import sys from pathlib import Path import uuid import tempfile import subprocess import torch import torch.nn.functional as F import torchaudio import os from typing import Any import time from contextlib import contextmanager @contextmanager def timer(name: str): start = time.time() print(f"{name}...") yield print(f" -> {name} completed in {time.time() - start:.2f} sec") def _coerce_audio_path(audio_path: Any) -> str: # Common Gradio case: tuple where first item is the filepath if isinstance(audio_path, tuple) and len(audio_path) > 0: audio_path = audio_path[0] # Some gradio versions pass a dict-like object if isinstance(audio_path, dict): # common keys: "name", "path" audio_path = audio_path.get("name") or audio_path.get("path") # pathlib.Path etc. if not isinstance(audio_path, (str, bytes, os.PathLike)): raise TypeError(f"audio_path must be a path-like, got {type(audio_path)}: {audio_path}") return os.fspath(audio_path) def extract_audio_wav_ffmpeg(video_path: str, target_sr: int = 48000) -> str | None: """ Extract audio from a video into a temp WAV (mono, target_sr). Returns path, or None if the video has no audio stream. """ out_path = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name # Check if there's an audio stream probe_cmd = [ "ffprobe", "-v", "error", "-select_streams", "a:0", "-show_entries", "stream=codec_type", "-of", "default=nw=1:nk=1", video_path, ] try: out = subprocess.check_output(probe_cmd).decode("utf-8").strip() if not out: return None except subprocess.CalledProcessError: return None # Extract + resample + mono cmd = [ "ffmpeg", "-y", "-v", "error", "-i", video_path, "-vn", "-ac", "1", "-ar", str(int(target_sr)), "-c:a", "pcm_s16le", out_path ] subprocess.check_call(cmd) return out_path def match_audio_to_duration( audio_path: str, target_seconds: float, target_sr: int = 48000, to_mono: bool = True, pad_mode: str = "silence", # "silence" | "repeat" device: str = "cuda", ): """ Load audio, resample, (optionally) mono, then trim/pad to exactly target_seconds. Returns: (waveform[T] or [1,T], sr) """ audio_path = _coerce_audio_path(audio_path) wav, sr = torchaudio.load(audio_path) # [C, T] float32 CPU # Resample to target_sr (recommended so duration math is stable) if sr != target_sr: wav = torchaudio.functional.resample(wav, sr, target_sr) sr = target_sr # Mono (common expectation; if your model supports stereo, set to_mono=False) if to_mono and wav.shape[0] > 1: wav = wav.mean(dim=0, keepdim=True) # [1, T] # Exact target length in samples target_len = int(round(target_seconds * sr)) cur_len = wav.shape[-1] if cur_len > target_len: wav = wav[..., :target_len] elif cur_len < target_len: pad_len = target_len - cur_len if pad_mode == "repeat" and cur_len > 0: # Repeat then cut to exact length reps = (target_len + cur_len - 1) // cur_len wav = wav.repeat(1, reps)[..., :target_len] else: # Silence pad wav = F.pad(wav, (0, pad_len)) # move to device wav = wav.to(device, non_blocking=True) return wav, sr def sh(cmd): subprocess.check_call(cmd, shell=True) # Add packages to Python path current_dir = Path(__file__).parent sys.path.insert(0, str(current_dir / "packages" / "ltx-pipelines" / "src")) sys.path.insert(0, str(current_dir / "packages" / "ltx-core" / "src")) import spaces import flash_attn_interface import time import gradio as gr import numpy as np import random import torch from typing import Optional from pathlib import Path import torchaudio from huggingface_hub import hf_hub_download, snapshot_download from ltx_pipelines.distilled import DistilledPipeline from ltx_core.model.video_vae import TilingConfig from ltx_core.model.audio_vae.ops import AudioProcessor from ltx_core.loader.primitives import LoraPathStrengthAndSDOps from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP from ltx_pipelines.utils.constants import ( DEFAULT_SEED, DEFAULT_1_STAGE_HEIGHT, DEFAULT_1_STAGE_WIDTH , DEFAULT_NUM_FRAMES, DEFAULT_FRAME_RATE, DEFAULT_LORA_STRENGTH, ) from ltx_core.loader.single_gpu_model_builder import enable_only_lora from ltx_core.model.audio_vae import decode_audio from ltx_core.model.audio_vae import encode_audio from PIL import Image MAX_SEED = np.iinfo(np.int32).max # Import from public LTX-2 package # Install with: pip install git+https://github.com/Lightricks/LTX-2.git from ltx_pipelines.utils import ModelLedger from ltx_pipelines.utils.helpers import generate_enhanced_prompt import imageio import cv2 from controlnet_aux import CannyDetector, MidasDetector from dwpose import DwposeDetector input_image_debug_value = [None] input_audio_debug_value = [None] input_video_debug_value = [None] prompt_debug_value = [None] total_second_length_debug_value = [None] resolution_debug_value = [None] factor_debug_value = [None] allocation_time_debug_value = [None] # HuggingFace Hub defaults DEFAULT_REPO_ID = "Lightricks/LTX-2" DEFAULT_GEMMA_REPO_ID = "unsloth/gemma-3-12b-it-qat-bnb-4bit" DEFAULT_CHECKPOINT_FILENAME = "ltx-2-19b-dev.safetensors" def get_hub_or_local_checkpoint(repo_id: str, filename: str): """Download from HuggingFace Hub.""" print(f"Downloading {filename} from {repo_id}...") ckpt_path = hf_hub_download(repo_id=repo_id, filename=filename) print(f"Downloaded to {ckpt_path}") return ckpt_path def download_gemma_model(repo_id: str): """Download the full Gemma model directory.""" print(f"Downloading Gemma model from {repo_id}...") local_dir = snapshot_download(repo_id=repo_id) print(f"Gemma model downloaded to {local_dir}") return local_dir # Initialize model ledger and text encoder at startup (load once, keep in memory) print("=" * 80) print("Loading Gemma Text Encoder...") print("=" * 80) checkpoint_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_CHECKPOINT_FILENAME) gemma_local_path = download_gemma_model(DEFAULT_GEMMA_REPO_ID) device = "cuda" print(f"Initializing text encoder with:") print(f" checkpoint_path={checkpoint_path}") print(f" gemma_root={gemma_local_path}") print(f" device={device}") model_ledger = ModelLedger( dtype=torch.bfloat16, device=device, checkpoint_path=checkpoint_path, gemma_root_path=DEFAULT_GEMMA_REPO_ID, local_files_only=False ) canny_processor = CannyDetector() # Depth (MiDaS) processor # Downloads annotator weights automatically the first time. depth_processor = MidasDetector.from_pretrained("lllyasviel/Annotators").to("cuda") # Load text encoder once and keep it in memory text_encoder = model_ledger.text_encoder() print("=" * 80) print("Text encoder loaded and ready!") print("=" * 80) def on_lora_change(selected: str): needs_video = selected in {"Pose", "Canny", "Detailer"} return ( selected, gr.update(visible=not needs_video, value=None if needs_video else None), gr.update(visible=needs_video, value=None if not needs_video else None), ) def process_video_for_pose(frames, width: int, height: int): pose_processor = DwposeDetector.from_pretrained_default() if not frames: return [] pose_frames = [] for frame in frames: # imageio frame -> PIL pil = Image.fromarray(frame.astype(np.uint8)).convert("RGB") # ✅ do NOT pass width/height here (easy_dwpose will handle drawing sizes internally) pose_img = pose_processor(pil, include_body=True, include_hand=True, include_face=True) # Ensure it's PIL then resize to your conditioning size if not isinstance(pose_img, Image.Image): # some versions might return np array pose_img = Image.fromarray(pose_img.astype(np.uint8)) pose_img = pose_img.convert("RGB").resize((width, height), Image.BILINEAR) pose_np = np.array(pose_img).astype(np.float32) / 255.0 pose_frames.append(pose_np) return pose_frames def preprocess_video_to_pose_mp4(video_path: str, width: int, height: int, fps: float): frames = load_video_frames(video_path) pose_frames = process_video_for_pose(frames, width=width, height=height) tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) tmp.close() return write_video_mp4(pose_frames, fps=fps, out_path=tmp.name) def process_video_for_depth(frames, width: int, height: int): """ Convert RGB frames -> depth map frames. Returns list of np arrays (H,W,3) float in [0..1] (controlnet-style). """ if not frames: return [] detect_resolution = max(frames[0].shape[0], frames[0].shape[1]) image_resolution = max(width, height) depth_frames = [] for frame in frames: # controlnet_aux MidasDetector returns float [0..1] when output_type="np" depth = depth_processor( frame, detect_resolution=detect_resolution, image_resolution=image_resolution, output_type="np", ) # Safety: ensure HWC and 3 channels (some versions may output 1ch) if depth.ndim == 2: depth = np.stack([depth, depth, depth], axis=-1) elif depth.shape[-1] == 1: depth = np.repeat(depth, 3, axis=-1) depth_frames.append(depth) return depth_frames def preprocess_video_to_depth_mp4(video_path: str, width: int, height: int, fps: float): """End-to-end: read video -> depth -> write temp mp4 -> return path.""" frames = load_video_frames(video_path) depth_frames = process_video_for_depth(frames, width=width, height=height) tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) tmp.close() return write_video_mp4(depth_frames, fps=fps, out_path=tmp.name) def load_video_frames(video_path: str): """Return list of frames as numpy arrays (H,W,3) uint8.""" frames = [] with imageio.get_reader(video_path) as reader: for frame in reader: frames.append(frame) return frames def process_video_for_canny(frames, width: int, height: int, low_threshold=20, high_threshold=60): """ Convert RGB frames -> canny edge frames. Returns list of np arrays (H,W,3) in float [0..1] (like controlnet_aux output). """ if not frames: return [] detect_resolution = max(frames[0].shape[0], frames[0].shape[1]) image_resolution = max(width, height) canny_frames = [] for frame in frames: # controlnet_aux CannyDetector returns float image in [0..1] if output_type="np" # frame_blur = cv2.GaussianBlur(frame, (3, 3), 0) canny = canny_processor( frame, low_threshold=low_threshold, high_threshold=high_threshold, detect_resolution=detect_resolution, image_resolution=image_resolution, output_type="np", ) canny_frames.append(canny) return canny_frames def write_video_mp4(frames_float_01, fps: float, out_path: str): """Write frames in float [0..1] to mp4 as uint8.""" frames_uint8 = [(f * 255).astype(np.uint8) for f in frames_float_01] # PyAV backend doesn't support `quality=...` with imageio.get_writer(out_path, fps=fps, macro_block_size=1) as writer: for fr in frames_uint8: writer.append_data(fr) return out_path def preprocess_video_to_canny_mp4(video_path: str, width: int, height: int, fps: float): """End-to-end: read video -> canny -> write temp mp4 -> return path.""" frames = load_video_frames(video_path) canny_frames = process_video_for_canny(frames, width=width, height=height) tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) tmp.close() return write_video_mp4(canny_frames, fps=fps, out_path=tmp.name) import json def probe_video_duration_seconds(video_path: str) -> float: """Return duration in seconds using ffprobe.""" cmd = [ "ffprobe", "-v", "error", "-select_streams", "v:0", "-show_entries", "format=duration", "-of", "json", video_path, ] out = subprocess.check_output(cmd).decode("utf-8") data = json.loads(out) dur = float(data["format"]["duration"]) return dur def trim_video_to_seconds_ffmpeg(video_path: str, target_seconds: float, fps: float = None) -> str: """ Trim video to [0, target_seconds]. Re-encode for accuracy & compatibility. If fps is provided, also normalize fps. Returns new temp mp4 path. """ target_seconds = max(0.01, float(target_seconds)) out_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name vf = [] if fps is not None: vf.append(f"fps={float(fps)}") vf_str = ",".join(vf) if vf else None cmd = ["ffmpeg", "-y", "-v", "error"] # Accurate trim: use -t and re-encode. cmd += ["-i", video_path, "-t", f"{target_seconds:.6f}"] if vf_str: cmd += ["-vf", vf_str] # Safe default encode cmd += [ "-c:v", "libx264", "-pix_fmt", "yuv420p", "-preset", "veryfast", "-crf", "18", "-an", # conditioning video doesn't need audio out_path ] subprocess.check_call(cmd) return out_path def extract_first_frame_png(video_path: str) -> str: """Extract first frame as png; returns png path.""" out_path = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name cmd = [ "ffmpeg", "-y", "-v", "error", "-i", video_path, "-frames:v", "1", out_path ] subprocess.check_call(cmd) return out_path def _coerce_video_path(video_path: Any) -> str: if isinstance(video_path, tuple) and len(video_path) > 0: video_path = video_path[0] if isinstance(video_path, dict): video_path = video_path.get("name") or video_path.get("path") if not isinstance(video_path, (str, bytes, os.PathLike)): raise TypeError(f"video_path must be a path-like, got {type(video_path)}: {video_path}") return os.fspath(video_path) def prepare_conditioning_video_mp4( video_path: Any, target_num_frames: int, target_fps: float, ) -> tuple[str, str]: """ Returns (conditioning_mp4_path, first_frame_png_path). Makes an mp4 with exactly target_num_frames frames: - if source has more -> truncate - if source has fewer -> pad by repeating last frame """ video_path = _coerce_video_path(video_path) # Decode frames (robust / deterministic) frames = load_video_frames(video_path) # list of HWC uint8 frames if not frames: raise ValueError("No frames decoded from input video") # Truncate or pad to exact length if len(frames) >= target_num_frames: frames = frames[:target_num_frames] else: last = frames[-1] frames = frames + [last] * (target_num_frames - len(frames)) # Save first frame as PNG (for input_image) first_png = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name Image.fromarray(frames[0]).save(first_png) # Write conditioning mp4 # write_video_mp4 expects float [0..1] frames_float = [f.astype(np.float32) / 255.0 for f in frames] cond_mp4 = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name write_video_mp4(frames_float, fps=target_fps, out_path=cond_mp4) return cond_mp4, first_png def valid_1_plus_8k(n: int) -> int: """Largest integer <= n that is of the form 1 + 8*k (k>=0).""" if n <= 0: return 0 return 1 + 8 * ((n - 1) // 8) def prepare_conditioning_video_mp4_no_pad( video_path: Any, duration_frames: int, target_fps: float, ) -> tuple[str, str, int]: """ Returns (conditioning_mp4_path, first_frame_png_path, used_num_frames) - Decodes source frames - Trims to the largest valid length (1 + 8*k) <= source length - NEVER pads / loops / repeats last frame """ video_path = _coerce_video_path(video_path) frames = load_video_frames(video_path) # list of HWC uint8 if not frames: raise ValueError("No frames decoded from input video") n_src = len(frames) n_src = min(n_src, duration_frames) n_used = valid_1_plus_8k(n_src) # If the video is extremely short (e.g. 1 frame), n_used can be 1 which is valid. if n_used == 0: raise ValueError(f"Video too short: {n_src} frames") frames = frames[:n_used] first_png = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name Image.fromarray(frames[0]).save(first_png) frames_float = [f.astype(np.float32) / 255.0 for f in frames] cond_mp4 = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name write_video_mp4(frames_float, fps=target_fps, out_path=cond_mp4) return cond_mp4, first_png, n_used def encode_text_simple(text_encoder, prompt: str): """Simple text encoding without using pipeline_utils.""" v_context, a_context, _ = text_encoder(prompt) return v_context, a_context @spaces.GPU() def encode_prompt( prompt: str, enhance_prompt: bool = True, input_image=None, # this is now filepath (string) or None seed: int = 42, negative_prompt: str = "realistic, photograph, shaky, glitchy, low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly, transition, static" ): start_time = time.time() try: final_prompt = prompt if enhance_prompt: final_prompt = generate_enhanced_prompt( text_encoder=text_encoder, prompt=prompt, image_path=input_image if input_image is not None else None, seed=seed, ) with torch.inference_mode(): video_context, audio_context = encode_text_simple(text_encoder, final_prompt) video_context_negative = None audio_context_negative = None if negative_prompt: video_context_negative, audio_context_negative = encode_text_simple(text_encoder, negative_prompt) # IMPORTANT: return tensors directly (no torch.save) embedding_data = { "video_context": video_context.detach().cpu(), "audio_context": audio_context.detach().cpu(), "prompt": final_prompt, "original_prompt": prompt, } if video_context_negative is not None: embedding_data["video_context_negative"] = video_context_negative embedding_data["audio_context_negative"] = audio_context_negative embedding_data["negative_prompt"] = negative_prompt elapsed_time = time.time() - start_time if torch.cuda.is_available(): allocated = torch.cuda.memory_allocated() / 1024**3 peak = torch.cuda.max_memory_allocated() / 1024**3 status = f"✓ Encoded in {elapsed_time:.2f}s | VRAM: {allocated:.2f}GB allocated, {peak:.2f}GB peak" else: status = f"✓ Encoded in {elapsed_time:.2f}s (CPU mode)" return embedding_data, final_prompt, status except Exception as e: import traceback error_msg = f"Error: {str(e)}\n{traceback.format_exc()}" print(error_msg) return None, prompt, error_msg # Default prompt from docstring example DEFAULT_PROMPT = "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot." # HuggingFace Hub defaults DEFAULT_REPO_ID = "Lightricks/LTX-2" DEFAULT_CHECKPOINT_FILENAME = "ltx-2-19b-dev.safetensors" DEFAULT_DISTILLED_LORA_FILENAME = "ltx-2-19b-distilled-lora-384.safetensors" DEFAULT_SPATIAL_UPSAMPLER_FILENAME = "ltx-2-spatial-upscaler-x2-1.0.safetensors" def get_hub_or_local_checkpoint(repo_id: Optional[str] = None, filename: Optional[str] = None): """Download from HuggingFace Hub or use local checkpoint.""" if repo_id is None and filename is None: raise ValueError("Please supply at least one of `repo_id` or `filename`") if repo_id is not None: if filename is None: raise ValueError("If repo_id is specified, filename must also be specified.") print(f"Downloading {filename} from {repo_id}...") ckpt_path = hf_hub_download(repo_id=repo_id, filename=filename) print(f"Downloaded to {ckpt_path}") else: ckpt_path = filename return ckpt_path # Initialize pipeline at startup print("=" * 80) print("Loading LTX-2 Distilled pipeline...") print("=" * 80) checkpoint_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_CHECKPOINT_FILENAME) spatial_upsampler_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_SPATIAL_UPSAMPLER_FILENAME) print(f"Initializing pipeline with:") print(f" checkpoint_path={checkpoint_path}") print(f" spatial_upsampler_path={spatial_upsampler_path}") distilled_lora_path = get_hub_or_local_checkpoint( DEFAULT_REPO_ID, DEFAULT_DISTILLED_LORA_FILENAME, ) distilled_lora_path = get_hub_or_local_checkpoint( DEFAULT_REPO_ID, DEFAULT_DISTILLED_LORA_FILENAME, ) static_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Static", "ltx-2-19b-lora-camera-control-static.safetensors", ) dolly_in_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-In", "ltx-2-19b-lora-camera-control-dolly-in.safetensors", # "MachineDelusions/LTX-2_Image2Video_Adapter_LoRa", # "LTX-2-Image2Vid-Adapter.safetensors", ) dolly_out_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Out", "ltx-2-19b-lora-camera-control-dolly-out.safetensors", ) dolly_left_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Left", "ltx-2-19b-lora-camera-control-dolly-left.safetensors", ) dolly_right_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Right", "ltx-2-19b-lora-camera-control-dolly-right.safetensors", ) jib_down_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Jib-Down", "ltx-2-19b-lora-camera-control-jib-down.safetensors", ) jib_up_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-LoRA-Camera-Control-Jib-Up", "ltx-2-19b-lora-camera-control-jib-up.safetensors", ) detailer_lora_path = get_hub_or_local_checkpoint( # "Lightricks/LTX-2-19b-IC-LoRA-Detailer", # "ltx-2-19b-ic-lora-detailer.safetensors", "MachineDelusions/LTX-2_Image2Video_Adapter_LoRa", "LTX-2-Image2Vid-Adapter.safetensors", ) pose_lora_path = get_hub_or_local_checkpoint( "Lightricks/LTX-2-19b-IC-LoRA-Pose-Control", "ltx-2-19b-ic-lora-pose-control.safetensors", ) # Load distilled LoRA as a regular LoRA loras = [ # --- fused / base behavior --- LoraPathStrengthAndSDOps( path=distilled_lora_path, strength=0.6, sd_ops=LTXV_LORA_COMFY_RENAMING_MAP, ), LoraPathStrengthAndSDOps(static_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(detailer_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(dolly_in_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(dolly_out_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(dolly_left_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(dolly_right_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(jib_down_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(jib_up_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), LoraPathStrengthAndSDOps(pose_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), ] # Runtime-toggle LoRAs (exclude fused distilled at index 0) VISIBLE_RUNTIME_LORA_CHOICES = [ ("No LoRA", -1), ("Static", 0), ("Detailer", 1), ("Zoom In", 2), ("Zoom Out", 3), ("Slide Left", 4), ("Slide Right", 5), ("Slide Down", 6), ("Slide Up", 7), ] RUNTIME_LORA_CHOICES = [ ("No LoRA", -1), ("Static", 0), ("Detailer", 1), ("Zoom In", 2), ("Zoom Out", 3), ("Slide Left", 4), ("Slide Right", 5), ("Slide Down", 6), ("Slide Up", 7), ("Pose", 8), ] # Initialize pipeline WITHOUT text encoder (gemma_root=None) # Text encoding will be done by external space pipeline = DistilledPipeline( device=torch.device("cuda"), checkpoint_path=checkpoint_path, spatial_upsampler_path=spatial_upsampler_path, gemma_root=None, # No text encoder in this space loras=loras, fp8transformer=False, local_files_only=False, ) pipeline._video_encoder = pipeline.model_ledger.video_encoder() pipeline._transformer = pipeline.model_ledger.transformer() print("=" * 80) print("Pipeline fully loaded and ready!") print("=" * 80) class RadioAnimated(gr.HTML): """ Animated segmented radio (like iOS pill selector). Outputs: selected option string, e.g. "768x512" """ def __init__(self, choices, value=None, **kwargs): if not choices or len(choices) < 2: raise ValueError("RadioAnimated requires at least 2 choices.") if value is None: value = choices[0] uid = uuid.uuid4().hex[:8] # unique per instance group_name = f"ra-{uid}" inputs_html = "\n".join( f""" """ for i, c in enumerate(choices) ) # NOTE: use classes instead of duplicate IDs html_template = f"""
{inputs_html}
""" js_on_load = r""" (() => { const wrap = element.querySelector('.ra-wrap'); const inner = element.querySelector('.ra-inner'); const highlight = element.querySelector('.ra-highlight'); const inputs = Array.from(element.querySelectorAll('.ra-input')); const labels = Array.from(element.querySelectorAll('.ra-label')); if (!inputs.length || !labels.length) return; const choices = inputs.map(i => i.value); const PAD = 6; // must match .ra-inner padding and .ra-highlight top/left let currentIdx = 0; function setHighlightByIndex(idx) { currentIdx = idx; const lbl = labels[idx]; if (!lbl) return; const innerRect = inner.getBoundingClientRect(); const lblRect = lbl.getBoundingClientRect(); // width matches the label exactly highlight.style.width = `${lblRect.width}px`; // highlight has left: 6px, so subtract PAD to align const x = (lblRect.left - innerRect.left - PAD); highlight.style.transform = `translateX(${x}px)`; } function setCheckedByValue(val, shouldTrigger=false) { const idx = Math.max(0, choices.indexOf(val)); inputs.forEach((inp, i) => { inp.checked = (i === idx); }); // Wait a frame in case fonts/layout settle (prevents rare drift) requestAnimationFrame(() => setHighlightByIndex(idx)); props.value = choices[idx]; if (shouldTrigger) trigger('change', props.value); } // Init setCheckedByValue(props.value ?? choices[0], false); // Input handlers inputs.forEach((inp) => { inp.addEventListener('change', () => setCheckedByValue(inp.value, true)); }); // Recalc on resize (important in Gradio layouts) window.addEventListener('resize', () => setHighlightByIndex(currentIdx)); // sync from Python (Examples / backend updates) let last = props.value; const syncFromProps = () => { if (props.value !== last) { last = props.value; setCheckedByValue(last, false); } requestAnimationFrame(syncFromProps); }; requestAnimationFrame(syncFromProps); })(); """ super().__init__( value=value, html_template=html_template, js_on_load=js_on_load, **kwargs ) class PromptBox(gr.HTML): """ Prompt textarea with an internal footer slot (.ds-footer) where we can inject dropdowns. """ def __init__(self, value="", placeholder="Describe what you want...", **kwargs): uid = uuid.uuid4().hex[:8] html_template = f"""
""" js_on_load = r""" (() => { const textarea = element.querySelector(".ds-textarea"); if (!textarea) return; const autosize = () => { textarea.style.height = "0px"; textarea.style.height = Math.min(textarea.scrollHeight, 240) + "px"; }; const setValue = (v, triggerChange=false) => { const val = (v ?? ""); if (textarea.value !== val) textarea.value = val; autosize(); props.value = textarea.value; if (triggerChange) trigger("change", props.value); }; setValue(props.value, false); textarea.addEventListener("input", () => { autosize(); props.value = textarea.value; trigger("change", props.value); }); // ✅ Focus-on-load (robust) const shouldAutoFocus = () => { // don’t steal focus if user already clicked/typed somewhere const ae = document.activeElement; if (ae && ae !== document.body && ae !== document.documentElement) return false; // don’t auto-focus on small screens (optional; avoids mobile keyboard pop) if (window.matchMedia && window.matchMedia("(max-width: 768px)").matches) return false; return true; }; const focusWithRetry = (tries = 30) => { if (!shouldAutoFocus()) return; // only focus if still not focused if (document.activeElement !== textarea) textarea.focus({ preventScroll: true }); if (document.activeElement === textarea) return; if (tries > 0) requestAnimationFrame(() => focusWithRetry(tries - 1)); }; // wait a tick so Gradio/layout settles requestAnimationFrame(() => focusWithRetry()); // keep your sync loop let last = props.value; const syncFromProps = () => { if (props.value !== last) { last = props.value; setValue(last, false); } requestAnimationFrame(syncFromProps); }; requestAnimationFrame(syncFromProps); })(); """ super().__init__(value=value, html_template=html_template, js_on_load=js_on_load, **kwargs) class CameraDropdown(gr.HTML): """ Custom dropdown (More-style) with optional icons per item. Outputs: selected option string, e.g. "16:9" choices can be: - ["16:9", "1:1", "9:16"] (text only) - [{"label":"16:9","value":"16:9","icon":"…"}, ...] (icon+text) icon can be emoji or inline SVG/HTML. """ def __init__(self, choices, value="None", title="Dropdown", **kwargs): if not choices: raise ValueError("CameraDropdown requires choices.") # Normalize choices -> list of dicts: {label, value, icon(optional)} norm = [] for c in choices: if isinstance(c, dict): label = str(c.get("label", c.get("value", ""))) val = str(c.get("value", label)) icon = c.get("icon", None) # emoji or svg/html norm.append({"label": label, "value": val, "icon": icon}) else: s = str(c) norm.append({"label": s, "value": s, "icon": None}) uid = uuid.uuid4().hex[:8] def render_item(item): icon_html = "" if item["icon"]: icon_html = f'{item["icon"]}' return ( f'' ) items_html = "\n".join(render_item(item) for item in norm) html_template = f"""
""" # Pass a mapping value->label so the trigger can show label text # (and still output value to Python) value_to_label = {it["value"]: it["label"] for it in norm} value_to_icon = {it["value"]: (it["icon"] or "") for it in norm} js_on_load = r""" (() => { const wrap = element.querySelector(".cd-wrap"); const trigger = element.querySelector(".cd-trigger"); const triggerIcon = element.querySelector(".cd-trigger-icon"); const triggerText = element.querySelector(".cd-trigger-text"); const menu = element.querySelector(".cd-menu"); const items = Array.from(element.querySelectorAll(".cd-item")); if (!wrap || !trigger || !menu || !items.length) return; const valueToLabel = __VALUE_TO_LABEL__; const valueToIcon = __VALUE_TO_ICON__; const safeLabel = (v) => (valueToLabel && valueToLabel[v]) ? valueToLabel[v] : (v ?? "None"); const safeIcon = (v) => (valueToIcon && valueToIcon[v]) ? valueToIcon[v] : ""; function closeMenu() { menu.classList.remove("open"); trigger.setAttribute("aria-expanded", "false"); menu.setAttribute("aria-hidden", "true"); } function openMenu() { menu.classList.add("open"); trigger.setAttribute("aria-expanded", "true"); menu.setAttribute("aria-hidden", "false"); } function setValue(val, shouldTrigger = false) { const v = (val ?? "None"); props.value = v; // Trigger shows LABEL only (icons stay in menu) triggerText.textContent = safeLabel(v); if (triggerIcon) { triggerIcon.innerHTML = safeIcon(v); triggerIcon.style.display = safeIcon(v) ? "inline-flex" : "none"; } items.forEach(btn => { btn.dataset.selected = (btn.dataset.value === v) ? "true" : "false"; }); if (shouldTrigger) trigger("change", props.value); } trigger.addEventListener("pointerdown", (e) => { e.preventDefault(); e.stopPropagation(); if (menu.classList.contains("open")) closeMenu(); else openMenu(); }); document.addEventListener("pointerdown", (e) => { if (!wrap.contains(e.target)) closeMenu(); }, true); document.addEventListener("keydown", (e) => { if (e.key === "Escape") closeMenu(); }); wrap.addEventListener("focusout", (e) => { if (!wrap.contains(e.relatedTarget)) closeMenu(); }); items.forEach((btn) => { btn.addEventListener("pointerdown", (e) => { e.preventDefault(); e.stopPropagation(); closeMenu(); setValue(btn.dataset.value, true); }); }); // init setValue((props.value ?? "None"), false); // sync from Python let last = props.value; const syncFromProps = () => { if (props.value !== last) { last = props.value; setValue(last, false); } requestAnimationFrame(syncFromProps); }; requestAnimationFrame(syncFromProps); })(); """ # Inject mapping into JS safely import json js_on_load = js_on_load.replace("__VALUE_TO_LABEL__", json.dumps(value_to_label)) js_on_load = js_on_load.replace("__VALUE_TO_ICON__", json.dumps(value_to_icon)) super().__init__( value=value, html_template=html_template, js_on_load=js_on_load, **kwargs ) def generate_video_example_debug(input_image, prompt, camera_lora, resolution, radioanimated_mode, input_video, input_audio, progress=gr.Progress(track_tqdm=True)): allocation_time = None total_second_length = None if input_image_debug_value[0] is not None or input_audio_debug_value[0] is not None or input_video_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None or allocation_time_debug_value[0] is not None or resolution_debug_value[0] is not None or factor_debug_value[0] is not None: input_image = input_image_debug_value[0] input_audio = input_audio_debug_value[0] input_video = input_video_debug_value[0] prompt = prompt_debug_value[0] total_second_length = total_second_length_debug_value[0] resolution = resolution_debug_value[0] allocation_time = allocation_time_debug_value[0] return generate_video_example(input_image, prompt, camera_lora, resolution, radioanimated_mode, input_video, input_audio, total_second_length, allocation_time) def generate_video_example(input_image, prompt, camera_lora, resolution, radioanimated_mode, input_video, input_audio, total_second_length=5, allocation_time=None, progress=gr.Progress(track_tqdm=True)): w, h = apply_resolution(resolution) with timer(f'generating with video path:{input_video} with duration:{duration} and LoRA:{camera_lora} in {w}x{h}'): output_video = generate_video( input_image, prompt, 10, input_video, radioanimated_mode, True, 42, True, h, w, camera_lora, input_audio, total_second_length, allocation_time, progress ) return output_video def get_duration( input_image, prompt, duration, input_video, radioanimated_mode, enhance_prompt, seed, randomize_seed, height, width, camera_lora, audio_path, total_second_length = None, allocation_time = None, progress = None ): if allocation_time is not None: allocation_time extra_time = 0 if audio_path is not None: extra_time += 10 if input_video is not None: extra_time += 60 if duration <= 3: return 60 + extra_time elif duration <= 5: return 80 + extra_time elif duration <= 10: return 120 + extra_time else: return 180 + extra_time @spaces.GPU(duration=get_duration) def generate_video( input_image, prompt: str, duration: float, input_video = None, generation_mode = "Image-to-Video", enhance_prompt: bool = True, seed: int = 42, randomize_seed: bool = True, height: int = DEFAULT_1_STAGE_HEIGHT, width: int = DEFAULT_1_STAGE_WIDTH, camera_lora: str = "No LoRA", audio_path = None, total_second_length = None, allocation_time = None, progress=gr.Progress(track_tqdm=True), ): """ Generate a short cinematic video from a text prompt and optional input image using the LTX-2 distilled pipeline. Args: input_image: Optional input image for image-to-video. If provided, it is injected at frame 0 to guide motion. prompt: Text description of the scene, motion, and cinematic style to generate. duration: Desired video length in seconds. Converted to frames using a fixed 24 FPS rate. input_video: Optional conditioning video path (mp4). If provided, motion is guided by this video. enhance_prompt: Whether to enhance the prompt using the prompt enhancer before encoding. seed: Base random seed for reproducibility (ignored if randomize_seed is True). randomize_seed: If True, a random seed is generated for each run. height: Output video height in pixels. width: Output video width in pixels. camera_lora: Camera motion control LoRA to apply during generation (enables exactly one at runtime). audio_path: Optiona audio file for soundtrack. Could be a lipsync audio or a background music or a mixture of both guiding the image-to-vidoe motion process. progress: Gradio progress tracker. Returns: A tuple of: - output_path: Path to the generated MP4 video file. - seed: The seed used for generation. Notes: - Uses a fixed frame rate of 24 FPS. - Prompt embeddings are generated externally to avoid reloading the text encoder. - GPU cache is cleared after generation to reduce VRAM pressure. - If an input image is provided, it is temporarily saved to disk for processing. """ if (camera_lora != "No LoRA" or audio_path is not None) and duration == 15: gr.Info("15s not avaiable when a LoRA or lipsync is activated, reducing to 10s for this generation") duration = 10 if total_second_length is not None: duration = total_second_length if audio_path is None: print(f'generating with duration:{duration} and LoRA:{camera_lora} in {width}x{height}') else: print(f'generating with duration:{duration} and audio in {width}x{height}') # Randomize seed if checkbox is enabled current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) # Calculate num_frames from duration (using fixed 24 fps) frame_rate = 24.0 num_frames = int(duration * frame_rate) + 1 # +1 to ensure we meet the duration video_seconds = int(duration) with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: output_path = tmpfile.name images = [] videos = [] if generation_mode == "Motion Control": if input_video is not None: cond_mp4, first_png, used_frames = prepare_conditioning_video_mp4_no_pad( video_path=input_video, duration_frames=num_frames, target_fps=frame_rate, ) if input_image is None: images = [(first_png, 0, 1.0)] if audio_path is None: src_video_path = _coerce_video_path(input_video) extracted_audio_tmp = extract_audio_wav_ffmpeg(src_video_path, target_sr=48000) if extracted_audio_tmp is not None: audio_path = extracted_audio_tmp with timer("Pose selected: preprocessing conditioning video to pose..."): cond_path = preprocess_video_to_pose_mp4( video_path=cond_mp4, width=width, height=height, fps=frame_rate, ) videos = [(cond_path, 1.0)] camera_lora = "Pose" if input_image is not None: images = [(input_image, 0, 1.0)] embeddings, final_prompt, status = encode_prompt( prompt=prompt, enhance_prompt=enhance_prompt, input_image=input_image, seed=current_seed, negative_prompt="shaky, glitchy, low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly, transition, static", ) video_context = embeddings["video_context"].to("cuda", non_blocking=True) audio_context = embeddings["audio_context"].to("cuda", non_blocking=True) print("✓ Embeddings loaded successfully") # free prompt enhancer / encoder temps ASAP del embeddings, final_prompt, status torch.cuda.empty_cache() # ✅ if user provided audio, use a neutral audio_context n_audio_context = None if audio_path is not None: with torch.inference_mode(): _, n_audio_context = encode_text_simple(text_encoder, "") # returns tensors on GPU already del audio_context audio_context = n_audio_context if len(videos) == 0: camera_lora = "Static" torch.cuda.empty_cache() # Map dropdown name -> adapter index name_to_idx = {name: idx for name, idx in RUNTIME_LORA_CHOICES} selected_idx = name_to_idx.get(camera_lora, -1) enable_only_lora(pipeline._transformer, selected_idx) torch.cuda.empty_cache() # True video duration in seconds based on your rounding video_seconds = (num_frames - 1) / frame_rate if audio_path is not None: input_waveform, input_waveform_sample_rate = match_audio_to_duration( audio_path=audio_path, target_seconds=video_seconds, target_sr=48000, # pick what your model expects; 48k is common for AV models to_mono=True, # set False if your model wants stereo pad_mode="silence", # or "repeat" if you prefer looping over silence device="cuda", ) else: input_waveform = None input_waveform_sample_rate = None with timer(f'generating with video path:{input_video} and LoRA:{camera_lora} in {width}x{height}'): with torch.inference_mode(): pipeline( prompt=prompt, output_path=str(output_path), seed=current_seed, height=height, width=width, num_frames=num_frames, frame_rate=frame_rate, images=images, video_conditioning=videos, tiling_config=TilingConfig.default(), video_context=video_context, audio_context=audio_context, input_waveform=input_waveform, input_waveform_sample_rate=input_waveform_sample_rate, ) del video_context, audio_context torch.cuda.empty_cache() print("successful generation") return str(output_path) def apply_resolution(resolution: str): if resolution == "16:9": w, h = 768, 512 elif resolution == "1:1": w, h = 512, 512 elif resolution == "9:16": w, h = 512, 768 return int(w), int(h) def apply_duration(duration: str): duration_s = int(duration[:-1]) return duration_s def on_mode_change(selected: str): is_i2v = (selected == "Image-to-Video") return gr.update(visible=not is_i2v) css = """ /* Make the row behave nicely */ #controls-row { display: none !important; align-items: center; gap: 12px; flex-wrap: nowrap; /* or wrap if you prefer on small screens */ } /* Stop these components from stretching */ #controls-row > * { flex: 0 0 auto !important; width: auto !important; min-width: 0 !important; } #col-container { margin: 0 auto; max-width: 1600px; } #modal-container { width: 100vw; /* Take full viewport width */ height: 100vh; /* Take full viewport height (optional) */ display: flex; justify-content: center; /* Center content horizontally */ align-items: center; /* Center content vertically if desired */ } #modal-content { width: 100%; max-width: 700px; /* Limit content width */ margin: 0 auto; border-radius: 8px; padding: 1.5rem; } #step-column { padding: 10px; border-radius: 8px; box-shadow: var(--card-shadow); margin: 10px; } #col-showcase { margin: 0 auto; max-width: 1100px; } .button-gradient { background: linear-gradient(45deg, rgb(255, 65, 108), rgb(255, 75, 43), rgb(255, 155, 0), rgb(255, 65, 108)) 0% 0% / 400% 400%; border: none; padding: 14px 28px; font-size: 16px; font-weight: bold; color: white; border-radius: 10px; cursor: pointer; transition: 0.3s ease-in-out; animation: 2s linear 0s infinite normal none running gradientAnimation; box-shadow: rgba(255, 65, 108, 0.6) 0px 4px 10px; } .toggle-container { display: inline-flex; background-color: #ffd6ff; /* light pink background */ border-radius: 9999px; padding: 4px; position: relative; width: fit-content; font-family: sans-serif; } .toggle-container input[type="radio"] { display: none; } .toggle-container label { position: relative; z-index: 2; flex: 1; text-align: center; font-weight: 700; color: #4b2ab5; /* dark purple text for unselected */ padding: 6px 22px; border-radius: 9999px; cursor: pointer; transition: color 0.25s ease; } /* Moving highlight */ .toggle-highlight { position: absolute; top: 4px; left: 4px; width: calc(50% - 4px); height: calc(100% - 8px); background-color: #4b2ab5; /* dark purple background */ border-radius: 9999px; transition: transform 0.25s ease; z-index: 1; } /* When "True" is checked */ #true:checked ~ label[for="true"] { color: #ffd6ff; /* light pink text */ } /* When "False" is checked */ #false:checked ~ label[for="false"] { color: #ffd6ff; /* light pink text */ } /* Move highlight to right side when False is checked */ #false:checked ~ .toggle-highlight { transform: translateX(100%); } /* Center items inside that row */ #mode-row{ justify-content: center !important; align-items: center !important; } /* Center the mode row contents */ #mode-row { display: flex !important; justify-content: center !important; align-items: center !important; width: 100% !important; } /* Stop Gradio from making children stretch */ #mode-row > * { flex: 0 0 auto !important; width: auto !important; min-width: 0 !important; } /* Specifically ensure the HTML component wrapper doesn't take full width */ #mode-row .gr-html, #mode-row .gradio-html, #mode-row .prose, #mode-row .block { width: auto !important; flex: 0 0 auto !important; display: inline-block !important; } /* Center the pill itself */ #radioanimated_mode { display: inline-flex !important; justify-content: center !important; width: auto !important; } """ css += """ .cd-trigger-icon{ color: rgba(255,255,255,0.9); display: inline-flex; align-items: center; justify-content: center; width: 18px; height: 18px; } .cd-trigger-icon svg { width: 18px; height: 18px; display: block; } """ css += """ /* ---- radioanimated ---- */ .ra-wrap{ width: fit-content; } .ra-inner{ position: relative; display: inline-flex; align-items: center; gap: 0; padding: 6px; background: #0b0b0b; border-radius: 9999px; overflow: hidden; user-select: none; } .ra-input{ display: none; } .ra-label{ position: relative; z-index: 2; padding: 10px 18px; font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial; font-size: 14px; font-weight: 600; color: rgba(255,255,255,0.7); cursor: pointer; transition: color 180ms ease; white-space: nowrap; } .ra-highlight{ position: absolute; z-index: 1; top: 6px; left: 6px; height: calc(100% - 12px); border-radius: 9999px; background: #8bff97; /* green knob */ transition: transform 200ms ease, width 200ms ease; } /* selected label becomes darker like your screenshot */ .ra-input:checked + .ra-label{ color: rgba(0,0,0,0.75); } """ css += """ .cd-icn svg{ width: 18px; height: 18px; display: block; } .cd-icn svg *{ stroke: rgba(255,255,255,0.9); } """ css += """ /* --- prompt box --- */ .ds-prompt{ width: 100%; max-width: 720px; margin-top: 3px; } .ds-textarea{ width: 100%; box-sizing: border-box; background: #2b2b2b; color: rgba(255,255,255,0.9); border: 1px solid rgba(255,255,255,0.12); border-radius: 14px; padding: 14px 16px; outline: none; font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial; font-size: 15px; line-height: 1.35; resize: none; min-height: 210px; max-height: 210px; overflow-y: auto; /* IMPORTANT: space for the footer controls */ padding-bottom: 72px; } .ds-card{ width: 100%; max-width: 720px; margin: 0 auto; } .ds-top{ position: relative; } /* Make room for footer inside textarea */ .ds-textarea{ padding-bottom: 72px; } /* Footer positioning */ .ds-footer{ position: absolute; right: 12px; bottom: 10px; display: flex; gap: 8px; align-items: center; justify-content: flex-end; z-index: 3; } /* Smaller pill buttons inside footer */ .ds-footer .cd-trigger{ min-height: 32px; padding: 6px 10px; font-size: 12px; gap: 6px; border-radius: 9999px; } .ds-footer .cd-trigger-icon, .ds-footer .cd-icn{ width: 14px; height: 14px; } .ds-footer .cd-trigger-icon svg, .ds-footer .cd-icn svg{ width: 14px; height: 14px; } .ds-footer .cd-caret{ font-size: 11px; } /* Bottom safe area bar (optional but looks nicer) */ .ds-top::after{ content: ""; position: absolute; left: 1px; right: 1px; bottom: 1px; height: 56px; background: #2b2b2b; border-bottom-left-radius: 13px; border-bottom-right-radius: 13px; pointer-events: none; z-index: 2; } """ css += """ /* ---- camera dropdown ---- */ /* 1) Fix overlap: make the Gradio HTML block shrink-to-fit when it contains a CameraDropdown. Gradio uses .gr-html for HTML components in most versions; older themes sometimes use .gradio-html. This keeps your big header HTML unaffected because it doesn't contain .cd-wrap. */ /* 2) Actual dropdown layout */ .cd-wrap{ position: relative; display: inline-block; } /* 3) Match RadioAnimated pill size/feel */ .cd-trigger{ margin-top: 2px; display: inline-flex; align-items: center; justify-content: center; gap: 10px; border: none; box-sizing: border-box; padding: 10px 18px; min-height: 52px; line-height: 1.2; border-radius: 9999px; background: #0b0b0b; font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial; font-size: 14px; /* ✅ match .ra-label exactly */ color: rgba(255,255,255,0.7) !important; font-weight: 600 !important; cursor: pointer; user-select: none; white-space: nowrap; } /* Ensure inner spans match too */ .cd-trigger .cd-trigger-text, .cd-trigger .cd-caret{ color: rgba(255,255,255,0.7) !important; } /* keep caret styling */ .cd-caret{ opacity: 0.8; font-weight: 900; } /* 4) Ensure menu overlays neighbors and isn't clipped */ /* Move dropdown a tiny bit up (closer to the trigger) */ .cd-menu{ position: absolute; top: calc(100% + 4px); /* was +10px */ left: 0; min-width: 240px; background: #2b2b2b; border: 1px solid rgba(255,255,255,0.14); border-radius: 14px; box-shadow: 0 18px 40px rgba(0,0,0,0.35); padding: 10px; opacity: 0; transform: translateY(-6px); pointer-events: none; transition: opacity 160ms ease, transform 160ms ease; z-index: 9999; } .cd-title{ font-size: 12px; font-weight: 600; text-transform: uppercase; letter-spacing: 0.04em; color: rgba(255,255,255,0.45); /* 👈 muted grey */ margin-bottom: 6px; padding: 0 6px; pointer-events: none; /* title is non-interactive */ } .cd-menu.open{ opacity: 1; transform: translateY(0); pointer-events: auto; } .cd-items{ display: flex; flex-direction: column; gap: 0px; /* tighter, more like a native menu */ } /* Items: NO "boxed" buttons by default */ .cd-item{ width: 100%; text-align: left; border: none; background: transparent; /* ✅ removes box look */ color: rgba(255,255,255,0.92); padding: 8px 34px 8px 12px; /* right padding leaves room for tick */ border-radius: 10px; /* only matters on hover */ cursor: pointer; font-size: 14px; font-weight: 700; position: relative; transition: background 120ms ease; } /* “Box effect” only on hover (not always) */ .cd-item:hover{ background: rgba(255,255,255,0.08); } /* Tick on the right ONLY on hover */ .cd-item::after{ content: "✓"; position: absolute; right: 12px; top: 50%; transform: translateY(-50%); opacity: 0; /* hidden by default */ transition: opacity 120ms ease; color: rgba(255,255,255,0.9); font-weight: 900; } /* show tick ONLY for selected item */ .cd-item[data-selected="true"]::after{ opacity: 1; } /* keep hover box effect, but no tick change */ .cd-item:hover{ background: rgba(255,255,255,0.08); } /* Kill any old “selected” styling just in case */ .cd-item.selected{ background: transparent !important; border: none !important; } """ css += """ /* icons in dropdown items */ .cd-item{ display: flex; align-items: center; gap: 10px; } .cd-icn{ display: inline-flex; align-items: center; justify-content: center; width: 18px; height: 18px; flex: 0 0 18px; } .cd-label{ flex: 1; } /* ========================= FIX: prompt border + scrollbar bleed ========================= */ /* Put the border + background on the wrapper, not the textarea */ .ds-top{ position: relative; background: #2b2b2b; border: 1px solid rgba(255,255,255,0.12); border-radius: 14px; overflow: hidden; /* ensures the footer bar is clipped to rounded corners */ } /* Make textarea "transparent" so wrapper owns the border/background */ .ds-textarea{ background: transparent !important; border: none !important; border-radius: 0 !important; /* wrapper handles radius */ outline: none; /* keep your spacing */ padding: 14px 16px; padding-bottom: 72px; /* room for footer */ width: 100%; box-sizing: border-box; /* keep scroll behavior */ overflow-y: auto; /* prevent scrollbar bleed by hiding native scrollbar */ scrollbar-width: none; /* Firefox */ } .ds-textarea::-webkit-scrollbar{ /* Chrome/Safari */ width: 0; height: 0; } /* Safe-area bar: now it matches perfectly because it's inside the same bordered wrapper */ .ds-top::after{ content: ""; position: absolute; left: 0; right: 0; bottom: 0; height: 56px; background: #2b2b2b; pointer-events: none; z-index: 2; } /* Footer above the bar */ .ds-footer{ position: absolute; right: 12px; bottom: 10px; display: flex; gap: 8px; align-items: center; justify-content: flex-end; z-index: 3; } /* Ensure textarea content sits below overlays */ .ds-textarea{ position: relative; z-index: 1; } /* ===== FIX dropdown menu being clipped/behind ===== */ /* Let the dropdown menu escape the prompt wrapper */ .ds-top{ overflow: visible !important; /* IMPORTANT: do not clip the menu */ } /* Keep the rounded "safe area" look without clipping the menu */ .ds-top::after{ left: 0 !important; right: 0 !important; bottom: 0 !important; border-bottom-left-radius: 14px !important; border-bottom-right-radius: 14px !important; } /* Ensure the footer stays above the safe-area bar */ .ds-footer{ z-index: 20 !important; } /* Make sure the opened menu is above EVERYTHING */ .ds-footer .cd-menu{ z-index: 999999 !important; } /* Sometimes Gradio/columns/cards create stacking contexts; force the whole prompt card above nearby panels */ .ds-card{ position: relative; z-index: 50; } /* --- Fix focus highlight shape (make it match rounded container) --- */ /* Kill any theme focus ring on the textarea itself */ .ds-textarea:focus, .ds-textarea:focus-visible{ outline: none !important; box-shadow: none !important; } /* Optional: if some themes apply it even when not focused */ .ds-textarea{ outline: none !important; } /* Apply the focus ring to the rounded wrapper instead */ .ds-top:focus-within{ border-color: rgba(255,255,255,0.22) !important; box-shadow: 0 0 0 3px rgba(255,255,255,0.06) !important; border-radius: 14px !important; } /* If you see any tiny square corners, ensure the wrapper clips its own shadow properly */ .ds-top{ border-radius: 14px !important; } /* ========================= CameraDropdown: force readable menu text in BOTH themes ========================= */ /* Menu surface */ .cd-menu{ background: #2b2b2b !important; border: 1px solid rgba(255,255,255,0.14) !important; } /* Title */ .cd-title{ color: rgba(255,255,255,0.55) !important; } #default_examples { display:none; } /* Items + all descendants (fixes spans / inherited theme colors) */ .cd-item, .cd-item *{ color: rgba(255,255,255,0.92) !important; } /* Hover state */ .cd-item:hover{ background: rgba(255,255,255,0.10) !important; } /* Checkmark */ .cd-item::after{ color: rgba(255,255,255,0.92) !important; } /* (Optional) make sure the trigger stays readable too */ .cd-trigger, .cd-trigger *{ color: rgba(255,255,255,0.75) !important; } """ with gr.Blocks(title="LTX-2 Video Distilled 🎥🔈") as demo: gr.HTML( """

LTX-2 Distilled DiT-based audio-video foundation model

[model]

Using FA3 and Gemma 3 12B 4bit Quantisation for Faster Inference

HF Space by:
""" ) with gr.Column(elem_id="col-container"): with gr.Row(elem_id="mode-row"): radioanimated_mode = RadioAnimated( choices=["Image-to-Video", "Motion Control"], value="Image-to-Video", elem_id="radioanimated_mode" ) with gr.Row(): with gr.Column(elem_id="step-column"): input_image = gr.Image( label="First Frame (Optional)", type="filepath", height=256 ) input_video = gr.Video( label="Motion Reference Video", height=256, visible=False, ) relocate = gr.HTML( value="", html_template="
", js_on_load=r""" (() => { function moveIntoFooter() { const promptRoot = document.querySelector("#prompt_ui"); if (!promptRoot) return false; const footer = promptRoot.querySelector(".ds-footer"); if (!footer) return false; const dur = document.querySelector("#duration_ui .cd-wrap"); const res = document.querySelector("#resolution_ui .cd-wrap"); const cam = document.querySelector("#camera_ui .cd-wrap"); if (!dur || !res || !cam) return false; footer.appendChild(dur); footer.appendChild(res); footer.appendChild(cam); return true; } const tick = () => { if (!moveIntoFooter()) requestAnimationFrame(tick); }; requestAnimationFrame(tick); })(); """ ) prompt_ui = PromptBox( value="Make this image come alive with cinematic motion, smooth animation", elem_id="prompt_ui", ) audio_input = gr.Audio(label="Audio (Optional)", type="filepath") prompt = gr.Textbox( label="Prompt", value="Make this image come alive with cinematic motion, smooth animation", lines=3, max_lines=3, placeholder="Describe the motion and animation you want...", visible=False ) enhance_prompt = gr.Checkbox( label="Enhance Prompt", value=True, visible=False ) with gr.Accordion("Advanced Settings", open=False, visible=False): seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, value=DEFAULT_SEED, step=1 ) randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) with gr.Column(elem_id="step-column"): output_video = gr.Video(label="Generated Video", autoplay=True, height=512) with gr.Row(elem_id="controls-row"): duration_ui = CameraDropdown( choices=["3s", "4s", "5s", "6s", "7s", "8s", "9s", "10s", "12s", "15s"], value="5s", title="Clip Duration", elem_id="duration_ui" ) duration = gr.Slider( label="Duration (seconds)", minimum=1.0, maximum=15.0, value=5.0, step=0.1, visible=False ) ICON_16_9 = """""" ICON_1_1 = """""" ICON_9_16 = """""" resolution_ui = CameraDropdown( choices=[ {"label": "16:9", "value": "16:9", "icon": ICON_16_9}, {"label": "1:1", "value": "1:1", "icon": ICON_1_1}, {"label": "9:16", "value": "9:16", "icon": ICON_9_16}, ], value="16:9", title="Resolution", elem_id="resolution_ui" ) width = gr.Number(label="Width", value=DEFAULT_1_STAGE_WIDTH, precision=0, visible=False) height = gr.Number(label="Height", value=DEFAULT_1_STAGE_HEIGHT, precision=0, visible=False) camera_ui = CameraDropdown( choices=[name for name, _ in VISIBLE_RUNTIME_LORA_CHOICES], value="No LoRA", title="Camera LoRA", elem_id="camera_ui", ) # Hidden real dropdown (backend value) camera_lora = gr.Dropdown( label="Camera Control LoRA", choices=[name for name, _ in VISIBLE_RUNTIME_LORA_CHOICES], value="No LoRA", visible=False ) generate_btn = gr.Button("🤩 Generate Video", variant="primary", elem_classes="button-gradient") camera_ui.change( fn=lambda x: x, inputs=camera_ui, outputs=camera_lora, api_visibility="private" ) radioanimated_mode.change( fn=on_mode_change, inputs=radioanimated_mode, outputs=[input_video], api_visibility="private", ) duration_ui.change( fn=apply_duration, inputs=duration_ui, outputs=[duration], api_visibility="private" ) resolution_ui.change( fn=apply_resolution, inputs=resolution_ui, outputs=[width, height], api_visibility="private" ) prompt_ui.change( fn=lambda x: x, inputs=prompt_ui, outputs=prompt, api_visibility="private" ) generate_btn.click( fn=generate_video, inputs=[ input_image, prompt, duration, input_video, radioanimated_mode, enhance_prompt, seed, randomize_seed, height, width, camera_lora, audio_input ], outputs=[output_video] ) with gr.Row(elem_id="default_examples"): gr.Examples( examples=[ [ "supergirl-2.png", "A woman starts to sleep.", "Static", "16:9", "Image-to-Video", None, None ], [ "supergirl-2.png", "A woman starts to sleep.", "Static", "16:9", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman starts to sleep.", "Static", "1:1", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman raises the arms.", "Static", "16:9", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman jumps.", "Static", "16:9", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman talks.", "Static", "16:9", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman wakes up.", "Static", "16:9", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman wakes up.", "Static", "9:16", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl-2.png", "A woman speaks.", "Static", "16:9", "Image-to-Video", None, None ], [ "supergirl-2.png", "A woman speaks.", "Static", "16:9", "Image-to-Video", None, None ], [ "supergirl.png", "A woman is stuck.", "No LoRA", "9:16", "Image-to-Video", None, None, ], [ "clay.png", "The woman talks", "No LoRA", "9:16", "Motion Control", "tiktok.mp4", None, ], [ "clay.png", "स्त्री कथयति", "No LoRA", "9:16", "Motion Control", "tiktok.mp4", None, ], [ "paint.png", "The woman talks", "No LoRA", "9:16", "Motion Control", "tiktok.mp4", None, ], [ "paint.png", "The woman moves", "No LoRA", "9:16", "Motion Control", "tiktok.mp4", None, ], [ "highland.png", "A cow goes closer.", "No LoRA", "16:9", "Image-to-Video", None, None, ], [ "wednesday.png", "The girl looks at us.", "Zoom Out", "16:9", "Image-to-Video", None, None, ], [ "astronaut.png", "An astronaut hatches from an egg.", "Static", "1:1", "Image-to-Video", None, None, ], ], fn=generate_video_example_debug, inputs=[input_image, prompt_ui, camera_ui, resolution_ui, radioanimated_mode, input_video, audio_input], outputs = [output_video], cache_examples=True, examples_per_page=1, ) prompt_debug=gr.Textbox(label="Prompt Debug") input_image_debug=gr.Image(type="filepath", label="Image Debug") input_audio_debug = gr.Audio(label="Audio Debug", type="filepath") input_video_debug=gr.Video(label="Video Debug") total_second_length_debug=gr.Slider(label="Duration Debug", minimum=1, maximum=120, value=11, step=0.1) resolution_debug = gr.Dropdown( choices=[ ["16:9", "16:9"], ["1:1", "1:1"], ["9:16", "9:16"], ], value="9:16", elem_id="resolution_ui", label="Resolution Debug") factor_debug=gr.Slider(label="Factor Debug", minimum=1, maximum=100, value=3.2, step=0.1) allocation_time_debug=gr.Slider(label="Allocation Debug", minimum=1, maximum=60 * 40, value=60 * 20, step=1) def handle_field_debug_change( input_image_debug_data, input_audio_debug_data, input_video_debug_data, prompt_debug_data, total_second_length_debug_data, resolution_debug_data, factor_debug_data, allocation_time_debug_data ): input_image_debug_value[0] = input_image_debug_data input_audio_debug_value[0] = input_audio_debug_data input_video_debug_value[0] = input_video_debug_data prompt_debug_value[0] = prompt_debug_data total_second_length_debug_value[0] = total_second_length_debug_data resolution_debug_value[0] = resolution_debug_data factor_debug_value[0] = factor_debug_data allocation_time_debug_value[0] = allocation_time_debug_data return [] inputs_debug=[input_image_debug, input_audio_debug, input_video_debug, prompt_debug, total_second_length_debug, resolution_debug, factor_debug, allocation_time_debug] input_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) input_audio_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) input_video_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) prompt_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) total_second_length_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) resolution_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) factor_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) allocation_time_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[]) gr.Examples( examples=[ [ "supergirl-2.png", "A fuzzy puppet superhero character resembling a female puppet with blonde hair and a blue superhero suit sleeping in bed and just waking up, she gradually gets up, rubbing her eyes and looking at her dog that just popped on the bed. the scene feels chaotic, comedic, and emotional with expressive puppet reactions, cinematic lighting, smooth camera motion, shallow depth of field, and high-quality puppet-style animation", "Static", "16:9", "Image-to-Video", None, "supergirl.m4a" ], [ "supergirl.png", "A fuzzy puppet superhero character resembling a female puppet with blonde hair and a blue superhero suit stands inside an icy cave made of frozen walls and icicles, she looks panicked and frantic, rapidly turning her head left and right and scanning the cave while waving her arms and shouting angrily and desperately, mouthing the words “where the hell is my dog,” her movements exaggerated and puppet-like with high energy and urgency, suddenly a second puppet dog bursts into frame from the side, jumping up excitedly and tackling her affectionately while licking her face repeatedly, she freezes in surprise and then breaks into relief and laughter as the dog continues licking her, the scene feels chaotic, comedic, and emotional with expressive puppet reactions, cinematic lighting, smooth camera motion, shallow depth of field, and high-quality puppet-style animation", "No LoRA", "16:9", "Image-to-Video", None, None, ], [ "clay.png", "a character doing a tiktok dance by moving their heads side to side with dramatic lighting and cinematic effects and singing", "No LoRA", "9:16", "Motion Control", "tiktok.mp4", None, ], [ "paint.png", "a character doing a tiktok dance by moving their heads side to side with dramatic lighting and cinematic effects and singing", "No LoRA", "9:16", "Motion Control", "tiktok.mp4", None, ], [ "highland.png", "Realistic POV selfie-style video in a snowy, foggy field. Two shaggy Highland cows with long curved horns stand ahead. The camera is handheld and slightly shaky. The woman filming talks nervously and excitedly in a vlog tone: \"Oh my god guys… look how big those horns are… I’m kinda scared.\" The cow on the left walks toward the camera in a cute, bouncy, hopping way, curious and gentle. Snow crunches under its hooves, breath visible in the cold air. The horns look massive from the POV. As the cow gets very close, its wet nose with slight dripping fills part of the frame. She laughs nervously but reaches out and pets the cow. The cow makes deep, soft, interesting mooing and snorting sounds, calm and friendly. Ultra-realistic, natural lighting, immersive audio, documentary-style realism.", "No LoRA", "16:9", "Image-to-Video", None, None, ], [ "wednesday.png", "A cinematic dolly out of Wednesday Addams frozen mid-dance on a dark, blue-lit ballroom floor as students move indistinctly behind her, their footsteps and muffled music reduced to a distant, underwater thrum; the audio foregrounds her steady breathing and the faint rustle of fabric as she slowly raises one arm, never breaking eye contact with the camera, then after a deliberately long silence she speaks in a flat, dry, perfectly controlled voice, “I don’t dance… I vibe code,” each word crisp and unemotional, followed by an abrupt cutoff of her voice as the background sound swells slightly, reinforcing the deadpan humor, with precise lip sync, minimal facial movement, stark gothic lighting, and cinematic realism.", "Zoom Out", "16:9", "Image-to-Video", None, None, ], [ "astronaut.png", "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot.", "Static", "1:1", "Image-to-Video", None, None, ], ], fn=generate_video_example, inputs=[input_image, prompt_ui, camera_ui, resolution_ui, radioanimated_mode, input_video, audio_input], outputs = [output_video], label="Examples", run_on_click=False, cache_examples=False, ) if __name__ == "__main__": demo.launch(ssr_mode=False, mcp_server=True, css=css)