diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -1,4 +1,6 @@ -# --- PART 1: IMPORTS & SYSTEM SETUP --- +#################################################################################################### +### PART 1: Imports & Basic Utilities +#################################################################################################### import sys from pathlib import Path import uuid @@ -12,72 +14,95 @@ from typing import Any import time from contextlib import contextmanager from gradio.helpers import create_examples -import spaces -import flash_attn_interface -import gradio as gr -import numpy as np -import random -from typing import Optional -from huggingface_hub import hf_hub_download, snapshot_download -from PIL import Image -import imageio -import cv2 -import json - -# افزودن پکیج‌های لوکال به مسیر سیستم -current_dir = Path(__file__).parent -sys.path.insert(0, str(current_dir / "packages" / "ltx-pipelines" / "src")) -sys.path.insert(0, str(current_dir / "packages" / "ltx-core" / "src")) @contextmanager def timer(name: str): start = time.time() print(f"{name}...") yield - print(f" -> {name} تکمیل شد در {time.time() - start:.2f} ثانیه") - -def sh(cmd): subprocess.check_call(cmd, shell=True) - + print(f" -> {name} completed in {time.time() - start:.2f} sec") -# --- PART 2: AUDIO HELPER FUNCTIONS --- def _coerce_audio_path(audio_path: Any) -> str: + # Common Gradio case: tuple where first item is the filepath if isinstance(audio_path, tuple) and len(audio_path) > 0: audio_path = audio_path[0] + + # Some gradio versions pass a dict-like object if isinstance(audio_path, dict): + # common keys: "name", "path" audio_path = audio_path.get("name") or audio_path.get("path") + + # pathlib.Path etc. if not isinstance(audio_path, (str, bytes, os.PathLike)): raise TypeError(f"audio_path must be a path-like, got {type(audio_path)}: {audio_path}") + return os.fspath(audio_path) + +#################################################################################################### +### PART 2: Audio Extraction & Matching Utilities +#################################################################################################### def extract_audio_wav_ffmpeg(video_path: str, target_sr: int = 48000) -> str | None: + """ + Extract audio from a video into a temp WAV (mono, target_sr). + Returns path, or None if the video has no audio stream. + """ out_path = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name + + # Check if there's an audio stream probe_cmd = [ - "ffprobe", "-v", "error", "-select_streams", "a:0", - "-show_entries", "stream=codec_type", "-of", "default=nw=1:nk=1", video_path, + "ffprobe", "-v", "error", + "-select_streams", "a:0", + "-show_entries", "stream=codec_type", + "-of", "default=nw=1:nk=1", + video_path, ] try: out = subprocess.check_output(probe_cmd).decode("utf-8").strip() - if not out: return None + if not out: + return None except subprocess.CalledProcessError: return None + # Extract + resample + mono cmd = [ - "ffmpeg", "-y", "-v", "error", "-i", video_path, "-vn", "-ac", "1", - "-ar", str(int(target_sr)), "-c:a", "pcm_s16le", out_path + "ffmpeg", "-y", "-v", "error", + "-i", video_path, + "-vn", + "-ac", "1", + "-ar", str(int(target_sr)), + "-c:a", "pcm_s16le", + out_path ] subprocess.check_call(cmd) return out_path -def match_audio_to_duration(audio_path: str, target_seconds: float, target_sr: int = 48000, - to_mono: bool = True, pad_mode: str = "silence", device: str = "cuda"): +def match_audio_to_duration( + audio_path: str, + target_seconds: float, + target_sr: int = 48000, + to_mono: bool = True, + pad_mode: str = "silence", # "silence" | "repeat" + device: str = "cuda", +): + """ + Load audio, resample, (optionally) mono, then trim/pad to exactly target_seconds. + Returns: (waveform[T] or [1,T], sr) + """ audio_path = _coerce_audio_path(audio_path) - wav, sr = torchaudio.load(audio_path) + + wav, sr = torchaudio.load(audio_path) # [C, T] float32 CPU + + # Resample to target_sr (recommended so duration math is stable) if sr != target_sr: wav = torchaudio.functional.resample(wav, sr, target_sr) sr = target_sr + + # Mono (common expectation; if your model supports stereo, set to_mono=False) if to_mono and wav.shape[0] > 1: - wav = wav.mean(dim=0, keepdim=True) - + wav = wav.mean(dim=0, keepdim=True) # [1, T] + + # Exact target length in samples target_len = int(round(target_seconds * sr)) cur_len = wav.shape[-1] @@ -86,101 +111,106 @@ def match_audio_to_duration(audio_path: str, target_seconds: float, target_sr: i elif cur_len < target_len: pad_len = target_len - cur_len if pad_mode == "repeat" and cur_len > 0: + # Repeat then cut to exact length reps = (target_len + cur_len - 1) // cur_len wav = wav.repeat(1, reps)[..., :target_len] else: + # Silence pad wav = F.pad(wav, (0, pad_len)) - return wav.to(device, non_blocking=True), sr - - -# --- PART 3: VIDEO HELPER FUNCTIONS (FFMPEG) --- -def probe_video_duration_seconds(video_path: str) -> float: - cmd = [ - "ffprobe", "-v", "error", "-select_streams", "v:0", - "-show_entries", "format=duration", "-of", "json", video_path, - ] - out = subprocess.check_output(cmd).decode("utf-8") - return float(json.loads(out)["format"]["duration"]) - -def trim_video_to_seconds_ffmpeg(video_path: str, target_seconds: float, fps: float = None) -> str: - target_seconds = max(0.01, float(target_seconds)) - out_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name - vf = [] - if fps is not None: vf.append(f"fps={float(fps)}") - vf_str = ",".join(vf) if vf else None - cmd = ["ffmpeg", "-y", "-v", "error", "-i", video_path, "-t", f"{target_seconds:.6f}"] - if vf_str: cmd += ["-vf", vf_str] - cmd += ["-c:v", "libx264", "-pix_fmt", "yuv420p", "-preset", "veryfast", "-crf", "18", "-an", out_path] - subprocess.check_call(cmd) - return out_path -def extract_first_frame_png(video_path: str) -> str: - out_path = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name - cmd = ["ffmpeg", "-y", "-v", "error", "-i", video_path, "-frames:v", "1", out_path] - subprocess.check_call(cmd) - return out_path + # move to device + wav = wav.to(device, non_blocking=True) + return wav, sr -def load_video_frames(video_path: str): - frames = [] - with imageio.get_reader(video_path) as reader: - for frame in reader: - frames.append(frame) - return frames +def sh(cmd): subprocess.check_call(cmd, shell=True) -def write_video_mp4(frames_float_01, fps: float, out_path: str): - frames_uint8 = [(f * 255).astype(np.uint8) for f in frames_float_01] - with imageio.get_writer(out_path, fps=fps, macro_block_size=1) as writer: - for fr in frames_uint8: - writer.append_data(fr) - return out_path +#################################################################################################### +### PART 3: LTX Pipeline Imports & Constants +#################################################################################################### +# Add packages to Python path +current_dir = Path(__file__).parent +sys.path.insert(0, str(current_dir / "packages" / "ltx-pipelines" / "src")) +sys.path.insert(0, str(current_dir / "packages" / "ltx-core" / "src")) -# --- PART 4: LTX PIPELINE IMPORTS & CONSTANTS --- +import spaces +import flash_attn_interface +import time +import gradio as gr +import numpy as np +import random +import torch +from typing import Optional +from pathlib import Path +import torchaudio +from huggingface_hub import hf_hub_download, snapshot_download from ltx_pipelines.distilled import DistilledPipeline from ltx_core.model.video_vae import TilingConfig from ltx_core.model.audio_vae.ops import AudioProcessor from ltx_core.loader.primitives import LoraPathStrengthAndSDOps from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP from ltx_pipelines.utils.constants import ( - DEFAULT_SEED, DEFAULT_1_STAGE_HEIGHT, DEFAULT_1_STAGE_WIDTH, - DEFAULT_NUM_FRAMES, DEFAULT_FRAME_RATE, DEFAULT_LORA_STRENGTH, + DEFAULT_SEED, + DEFAULT_1_STAGE_HEIGHT, + DEFAULT_1_STAGE_WIDTH , + DEFAULT_NUM_FRAMES, + DEFAULT_FRAME_RATE, + DEFAULT_LORA_STRENGTH, ) from ltx_core.loader.single_gpu_model_builder import enable_only_lora -from ltx_core.model.audio_vae import decode_audio, encode_audio +from ltx_core.model.audio_vae import decode_audio +from ltx_core.model.audio_vae import encode_audio +from PIL import Image + +MAX_SEED = np.iinfo(np.int32).max +# Import from public LTX-2 package +# Install with: pip install git+https://github.com/Lightricks/LTX-2.git from ltx_pipelines.utils import ModelLedger from ltx_pipelines.utils.helpers import generate_enhanced_prompt -from controlnet_aux import CannyDetector, MidasDetector -from dwpose import DwposeDetector +import imageio +import cv2 -MAX_SEED = np.iinfo(np.int32).max +# HuggingFace Hub defaults DEFAULT_REPO_ID = "Lightricks/LTX-2" DEFAULT_GEMMA_REPO_ID = "unsloth/gemma-3-12b-it-qat-bnb-4bit" DEFAULT_CHECKPOINT_FILENAME = "ltx-2-19b-dev.safetensors" -# --- PART 5: MODEL DOWNLOAD & CACHE LOGIC --- +#################################################################################################### +### PART 4: Download Helpers +#################################################################################################### def get_hub_or_local_checkpoint(repo_id: str, filename: str): - print(f"در حال دانلود {filename} از {repo_id}...") + """Download from HuggingFace Hub.""" + print(f"Downloading {filename} from {repo_id}...") ckpt_path = hf_hub_download(repo_id=repo_id, filename=filename) - print(f"دانلود شد در: {ckpt_path}") + print(f"Downloaded to {ckpt_path}") return ckpt_path def download_gemma_model(repo_id: str): - print(f"در حال دانلود مدل Gemma از {repo_id}...") + """Download the full Gemma model directory.""" + print(f"Downloading Gemma model from {repo_id}...") local_dir = snapshot_download(repo_id=repo_id) - print(f"مدل Gemma دانلود شد در: {local_dir}") + print(f"Gemma model downloaded to {local_dir}") return local_dir -# --- PART 6: GLOBAL MODEL LOADING (TEXT ENCODER) --- +#################################################################################################### +### PART 5: Text Encoder Initialization +#################################################################################################### +# Initialize model ledger and text encoder at startup (load once, keep in memory) print("=" * 80) -print("در حال بارگذاری Text Encoder...") +print("Loading Gemma Text Encoder...") print("=" * 80) checkpoint_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_CHECKPOINT_FILENAME) gemma_local_path = download_gemma_model(DEFAULT_GEMMA_REPO_ID) device = "cuda" +print(f"Initializing text encoder with:") +print(f" checkpoint_path={checkpoint_path}") +print(f" gemma_root={gemma_local_path}") +print(f" device={device}") + model_ledger = ModelLedger( dtype=torch.bfloat16, device=device, @@ -188,177 +218,370 @@ model_ledger = ModelLedger( gemma_root_path=DEFAULT_GEMMA_REPO_ID, local_files_only=False ) + +# Load text encoder once and keep it in memory text_encoder = model_ledger.text_encoder() -print("Text encoder آماده است!") - - -# --- PART 7: CONTROLNET & POSE PRE-PROCESSORS --- -canny_processor = CannyDetector() -depth_processor = MidasDetector.from_pretrained("lllyasviel/Annotators").to("cuda") - -def process_video_for_pose(frames, width: int, height: int): - pose_processor = DwposeDetector.from_pretrained_default() - if not frames: return [] - pose_frames = [] - for frame in frames: - pil = Image.fromarray(frame.astype(np.uint8)).convert("RGB") - pose_img = pose_processor(pil, include_body=True, include_hand=True, include_face=True) - if not isinstance(pose_img, Image.Image): - pose_img = Image.fromarray(pose_img.astype(np.uint8)) - pose_img = pose_img.convert("RGB").resize((width, height), Image.BILINEAR) - pose_np = np.array(pose_img).astype(np.float32) / 255.0 - pose_frames.append(pose_np) - return pose_frames - - -# --- PART 8: VIDEO PRE-PROCESSING (POSE/DEPTH/CANNY) --- -def preprocess_video_to_pose_mp4(video_path: str, width: int, height: int, fps: float): - frames = load_video_frames(video_path) - pose_frames = process_video_for_pose(frames, width=width, height=height) - tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) - tmp.close() - return write_video_mp4(pose_frames, fps=fps, out_path=tmp.name) - -def process_video_for_depth(frames, width: int, height: int): - if not frames: return [] - detect_resolution = max(frames[0].shape[0], frames[0].shape[1]) - image_resolution = max(width, height) - depth_frames = [] - for frame in frames: - depth = depth_processor(frame, detect_resolution=detect_resolution, image_resolution=image_resolution, output_type="np") - if depth.ndim == 2: depth = np.stack([depth, depth, depth], axis=-1) - elif depth.shape[-1] == 1: depth = np.repeat(depth, 3, axis=-1) - depth_frames.append(depth) - return depth_frames - -def preprocess_video_to_depth_mp4(video_path: str, width: int, height: int, fps: float): - frames = load_video_frames(video_path) - depth_frames = process_video_for_depth(frames, width=width, height=height) - tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) - tmp.close() - return write_video_mp4(depth_frames, fps=fps, out_path=tmp.name) - -def process_video_for_canny(frames, width: int, height: int, low_threshold=20, high_threshold=60): - if not frames: return [] - detect_resolution = max(frames[0].shape[0], frames[0].shape[1]) - image_resolution = max(width, height) - canny_frames = [] - for frame in frames: - canny = canny_processor(frame, low_threshold=low_threshold, high_threshold=high_threshold, - detect_resolution=detect_resolution, image_resolution=image_resolution, output_type="np") - canny_frames.append(canny) - return canny_frames - - -# --- PART 9: CONDITIONING VIDEO PREPARATION --- + +print("=" * 80) +print("Text encoder loaded and ready!") +print("=" * 80) + +def on_lora_change(selected: str): + # Only Detailer might need handling, but we simplified motion control out + needs_video = selected in {"Detailer"} + return ( + selected, + gr.update(visible=not needs_video, value=None if needs_video else None), + gr.update(visible=needs_video, value=None if not needs_video else None), + ) + + +#################################################################################################### +### PART 6: Video/Frame Utilities +#################################################################################################### +def load_video_frames(video_path: str): + """Return list of frames as numpy arrays (H,W,3) uint8.""" + frames = [] + with imageio.get_reader(video_path) as reader: + for frame in reader: + frames.append(frame) + return frames + +def write_video_mp4(frames_float_01, fps: float, out_path: str): + """Write frames in float [0..1] to mp4 as uint8.""" + frames_uint8 = [(f * 255).astype(np.uint8) for f in frames_float_01] + + # PyAV backend doesn't support `quality=...` + with imageio.get_writer(out_path, fps=fps, macro_block_size=1) as writer: + for fr in frames_uint8: + writer.append_data(fr) + return out_path + +import json + +def probe_video_duration_seconds(video_path: str) -> float: + """Return duration in seconds using ffprobe.""" + cmd = [ + "ffprobe", "-v", "error", + "-select_streams", "v:0", + "-show_entries", "format=duration", + "-of", "json", + video_path, + ] + out = subprocess.check_output(cmd).decode("utf-8") + data = json.loads(out) + dur = float(data["format"]["duration"]) + return dur + + +#################################################################################################### +### PART 7: FFmpeg Utils & Frame Prep +#################################################################################################### +def trim_video_to_seconds_ffmpeg(video_path: str, target_seconds: float, fps: float = None) -> str: + """ + Trim video to [0, target_seconds]. Re-encode for accuracy & compatibility. + If fps is provided, also normalize fps. + Returns new temp mp4 path. + """ + target_seconds = max(0.01, float(target_seconds)) + + out_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name + + vf = [] + if fps is not None: + vf.append(f"fps={float(fps)}") + vf_str = ",".join(vf) if vf else None + + cmd = ["ffmpeg", "-y", "-v", "error"] + + # Accurate trim: use -t and re-encode. + cmd += ["-i", video_path, "-t", f"{target_seconds:.6f}"] + + if vf_str: + cmd += ["-vf", vf_str] + + # Safe default encode + cmd += [ + "-c:v", "libx264", "-pix_fmt", "yuv420p", "-preset", "veryfast", "-crf", "18", + "-an", # conditioning video doesn't need audio + out_path + ] + + subprocess.check_call(cmd) + return out_path + +def extract_first_frame_png(video_path: str) -> str: + """Extract first frame as png; returns png path.""" + out_path = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name + cmd = [ + "ffmpeg", "-y", "-v", "error", + "-i", video_path, + "-frames:v", "1", + out_path + ] + subprocess.check_call(cmd) + return out_path + def _coerce_video_path(video_path: Any) -> str: - if isinstance(video_path, tuple) and len(video_path) > 0: video_path = video_path[0] - if isinstance(video_path, dict): video_path = video_path.get("name") or video_path.get("path") + if isinstance(video_path, tuple) and len(video_path) > 0: + video_path = video_path[0] + if isinstance(video_path, dict): + video_path = video_path.get("name") or video_path.get("path") if not isinstance(video_path, (str, bytes, os.PathLike)): raise TypeError(f"video_path must be a path-like, got {type(video_path)}: {video_path}") return os.fspath(video_path) -def valid_1_plus_8k(n: int) -> int: - if n <= 0: return 0 - return 1 + 8 * ((n - 1) // 8) - -def prepare_conditioning_video_mp4_no_pad(video_path: Any, duration_frames: int, target_fps: float) -> tuple[str, str, int]: - video_path = _coerce_video_path(video_path) - frames = load_video_frames(video_path) - if not frames: raise ValueError("فریم‌های ویدیو بارگذاری نشد.") - n_src = min(len(frames), duration_frames) - n_used = valid_1_plus_8k(n_src) - if n_used == 0: raise ValueError(f"ویدیو خیلی کوتاه است: {n_src} فریم") - frames = frames[:n_used] - first_png = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name - Image.fromarray(frames[0]).save(first_png) - frames_float = [f.astype(np.float32) / 255.0 for f in frames] - cond_mp4 = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name - write_video_mp4(frames_float, fps=target_fps, out_path=cond_mp4) - return cond_mp4, first_png, n_used +#################################################################################################### +### PART 8: Prompt Encoding Logic +#################################################################################################### def encode_text_simple(text_encoder, prompt: str): + """Simple text encoding without using pipeline_utils.""" v_context, a_context, _ = text_encoder(prompt) return v_context, a_context @spaces.GPU() -def encode_prompt(prompt: str, enhance_prompt: bool = True, input_image=None, seed: int = 42, negative_prompt: str = ""): +def encode_prompt( + prompt: str, + enhance_prompt: bool = True, + input_image=None, # this is now filepath (string) or None + seed: int = 42, + negative_prompt: str = "" +): start_time = time.time() try: final_prompt = prompt if enhance_prompt: - final_prompt = generate_enhanced_prompt(text_encoder=text_encoder, prompt=prompt, - image_path=input_image, seed=seed) + final_prompt = generate_enhanced_prompt( + text_encoder=text_encoder, + prompt=prompt, + image_path=input_image if input_image is not None else None, + seed=seed, + ) + with torch.inference_mode(): video_context, audio_context = encode_text_simple(text_encoder, final_prompt) - + + video_context_negative = None + audio_context_negative = None + if negative_prompt: + video_context_negative, audio_context_negative = encode_text_simple(text_encoder, negative_prompt) + + # IMPORTANT: return tensors directly (no torch.save) embedding_data = { "video_context": video_context.detach().cpu(), "audio_context": audio_context.detach().cpu(), "prompt": final_prompt, + "original_prompt": prompt, } - status = f"✓ انکود شده در {time.time() - start_time:.2f} ثانیه" + if video_context_negative is not None: + embedding_data["video_context_negative"] = video_context_negative + embedding_data["audio_context_negative"] = audio_context_negative + embedding_data["negative_prompt"] = negative_prompt + + elapsed_time = time.time() - start_time + if torch.cuda.is_available(): + allocated = torch.cuda.memory_allocated() / 1024**3 + peak = torch.cuda.max_memory_allocated() / 1024**3 + status = f"✓ Encoded in {elapsed_time:.2f}s | VRAM: {allocated:.2f}GB allocated, {peak:.2f}GB peak" + else: + status = f"✓ Encoded in {elapsed_time:.2f}s (CPU mode)" + return embedding_data, final_prompt, status + except Exception as e: - return None, prompt, f"خطا: {str(e)}" + import traceback + error_msg = f"Error: {str(e)}\n{traceback.format_exc()}" + print(error_msg) + return None, prompt, error_msg +# Default prompt from docstring example +DEFAULT_PROMPT = "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot." -# --- PART 10: PIPELINE INITIALIZATION (DiT & LoRAs) --- -print("=" * 80) -print("در حال بارگذاری پایپ‌لاین LTX-2 Distilled...") -print("=" * 80) +#################################################################################################### +### PART 9: Pipeline Constants & Checkpoint Paths +#################################################################################################### +# HuggingFace Hub defaults +DEFAULT_REPO_ID = "Lightricks/LTX-2" +DEFAULT_CHECKPOINT_FILENAME = "ltx-2-19b-dev.safetensors" DEFAULT_DISTILLED_LORA_FILENAME = "ltx-2-19b-distilled-lora-384.safetensors" DEFAULT_SPATIAL_UPSAMPLER_FILENAME = "ltx-2-spatial-upscaler-x2-1.0.safetensors" -spatial_upsampler_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_SPATIAL_UPSAMPLER_FILENAME) -distilled_lora_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_DISTILLED_LORA_FILENAME) +def get_hub_or_local_checkpoint_pipeline(repo_id: Optional[str] = None, filename: Optional[str] = None): + """Download from HuggingFace Hub or use local checkpoint.""" + if repo_id is None and filename is None: + raise ValueError("Please supply at least one of `repo_id` or `filename`") + + if repo_id is not None: + if filename is None: + raise ValueError("If repo_id is specified, filename must also be specified.") + print(f"Downloading {filename} from {repo_id}...") + ckpt_path = hf_hub_download(repo_id=repo_id, filename=filename) + print(f"Downloaded to {ckpt_path}") + else: + ckpt_path = filename + + return ckpt_path + + +# Initialize pipeline at startup +print("=" * 80) +print("Loading LTX-2 Distilled pipeline...") +print("=" * 80) + +checkpoint_path = get_hub_or_local_checkpoint_pipeline(DEFAULT_REPO_ID, DEFAULT_CHECKPOINT_FILENAME) +spatial_upsampler_path = get_hub_or_local_checkpoint_pipeline(DEFAULT_REPO_ID, DEFAULT_SPATIAL_UPSAMPLER_FILENAME) + +print(f"Initializing pipeline with:") +print(f" checkpoint_path={checkpoint_path}") +print(f" spatial_upsampler_path={spatial_upsampler_path}") + +distilled_lora_path = get_hub_or_local_checkpoint_pipeline( + DEFAULT_REPO_ID, + DEFAULT_DISTILLED_LORA_FILENAME, +) + +static_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Static", + "ltx-2-19b-lora-camera-control-static.safetensors", +) +dolly_in_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-In", + "ltx-2-19b-lora-camera-control-dolly-in.safetensors", +) +dolly_out_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Out", + "ltx-2-19b-lora-camera-control-dolly-out.safetensors", +) +dolly_left_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Left", + "ltx-2-19b-lora-camera-control-dolly-left.safetensors", +) +dolly_right_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Right", + "ltx-2-19b-lora-camera-control-dolly-right.safetensors", +) +jib_down_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Jib-Down", + "ltx-2-19b-lora-camera-control-jib-down.safetensors", +) +jib_up_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-LoRA-Camera-Control-Jib-Up", + "ltx-2-19b-lora-camera-control-jib-up.safetensors", +) + +detailer_lora_path = get_hub_or_local_checkpoint_pipeline( + "Lightricks/LTX-2-19b-IC-LoRA-Detailer", + "ltx-2-19b-ic-lora-detailer.safetensors", +) -# دانلود و تنظیم مسیر LoRAها -def get_lora(repo, name): return get_hub_or_local_checkpoint(repo, name) +#################################################################################################### +### PART 10: LoRA Setup & Configuration +#################################################################################################### +# Load distilled LoRA as a regular LoRA loras = [ - LoraPathStrengthAndSDOps(distilled_lora_path, 0.6, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Static", "ltx-2-19b-lora-camera-control-static.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-IC-LoRA-Detailer", "ltx-2-19b-ic-lora-detailer.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-In", "ltx-2-19b-lora-camera-control-dolly-in.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Out", "ltx-2-19b-lora-camera-control-dolly-out.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Left", "ltx-2-19b-lora-camera-control-dolly-left.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Dolly-Right", "ltx-2-19b-lora-camera-control-dolly-right.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Jib-Down", "ltx-2-19b-lora-camera-control-jib-down.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-LoRA-Camera-Control-Jib-Up", "ltx-2-19b-lora-camera-control-jib-up.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), - LoraPathStrengthAndSDOps(get_lora("Lightricks/LTX-2-19b-IC-LoRA-Pose-Control", "ltx-2-19b-ic-lora-pose-control.safetensors"), DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + # --- fused / base behavior --- + LoraPathStrengthAndSDOps( + path=distilled_lora_path, + strength=0.6, + sd_ops=LTXV_LORA_COMFY_RENAMING_MAP, + ), + LoraPathStrengthAndSDOps(static_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(detailer_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(dolly_in_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(dolly_out_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(dolly_left_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(dolly_right_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(jib_down_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), + LoraPathStrengthAndSDOps(jib_up_lora_path, DEFAULT_LORA_STRENGTH, LTXV_LORA_COMFY_RENAMING_MAP), +] + +# Runtime-toggle LoRAs (exclude fused distilled at index 0) +VISIBLE_RUNTIME_LORA_CHOICES = [ + ("No LoRA", -1), + ("Static", 0), + ("Detailer", 1), + ("Zoom In", 2), + ("Zoom Out", 3), + ("Slide Left", 4), + ("Slide Right", 5), + ("Slide Down", 6), + ("Slide Up", 7), ] -# لیست انتخابی برای UI (فارسی به مقدار عددی) RUNTIME_LORA_CHOICES = [ - ("هیچکدام", -1), ("ثابت (Static)", 0), ("جزئیات‌دهنده (Detailer)", 1), - ("زوم به داخل", 2), ("زوم به عقب", 3), ("حرکت به چپ", 4), - ("حرکت به راست", 5), ("حرکت به پایین", 6), ("حرکت به بالا", 7), ("ژست (Pose)", 8), + ("No LoRA", -1), + ("Static", 0), + ("Detailer", 1), + ("Zoom In", 2), + ("Zoom Out", 3), + ("Slide Left", 4), + ("Slide Right", 5), + ("Slide Down", 6), + ("Slide Up", 7), ] + +#################################################################################################### +### PART 11: Pipeline Initialization +#################################################################################################### +# Initialize pipeline WITHOUT text encoder (gemma_root=None) +# Text encoding will be done by external space pipeline = DistilledPipeline( - device=torch.device("cuda"), checkpoint_path=checkpoint_path, spatial_upsampler_path=spatial_upsampler_path, - gemma_root=None, loras=loras, fp8transformer=False, local_files_only=False, + device=torch.device("cuda"), + checkpoint_path=checkpoint_path, + spatial_upsampler_path=spatial_upsampler_path, + gemma_root=None, # No text encoder in this space + loras=loras, + fp8transformer=False, + local_files_only=False, ) + pipeline._video_encoder = pipeline.model_ledger.video_encoder() pipeline._transformer = pipeline.model_ledger.transformer() -print("پایپ‌لاین آماده است!") +print("=" * 80) +print("Pipeline fully loaded and ready!") +print("=" * 80) -# --- PART 11: CUSTOM COMPONENT - RadioAnimated (JS/HTML) --- + +#################################################################################################### +### PART 12: Custom Component - RadioAnimated +#################################################################################################### class RadioAnimated(gr.HTML): + """ + Animated segmented radio (like iOS pill selector). + Outputs: selected option string, e.g. "768x512" + """ def __init__(self, choices, value=None, **kwargs): - if value is None: value = choices[0] - uid = uuid.uuid4().hex[:8] + if not choices or len(choices) < 2: + raise ValueError("RadioAnimated requires at least 2 choices.") + if value is None: + value = choices[0] + + uid = uuid.uuid4().hex[:8] # unique per instance group_name = f"ra-{uid}" + inputs_html = "\n".join( - f'' - f'' + f""" + + + """ for i, c in enumerate(choices) ) - html_template = f'
{inputs_html}
' - + + # NOTE: use classes instead of duplicate IDs + html_template = f""" +
+
+
+ {inputs_html} +
+
+ """ + js_on_load = r""" (() => { const wrap = element.querySelector('.ra-wrap'); @@ -366,63 +589,107 @@ class RadioAnimated(gr.HTML): const highlight = element.querySelector('.ra-highlight'); const inputs = Array.from(element.querySelectorAll('.ra-input')); const labels = Array.from(element.querySelectorAll('.ra-label')); + if (!inputs.length || !labels.length) return; + const choices = inputs.map(i => i.value); - const PAD = 6; + const PAD = 6; // must match .ra-inner padding and .ra-highlight top/left + let currentIdx = 0; + function setHighlightByIndex(idx) { currentIdx = idx; + const lbl = labels[idx]; if (!lbl) return; + const innerRect = inner.getBoundingClientRect(); const lblRect = lbl.getBoundingClientRect(); + + // width matches the label exactly highlight.style.width = `${lblRect.width}px`; + + // highlight has left: 6px, so subtract PAD to align const x = (lblRect.left - innerRect.left - PAD); highlight.style.transform = `translateX(${x}px)`; } + function setCheckedByValue(val, shouldTrigger=false) { const idx = Math.max(0, choices.indexOf(val)); inputs.forEach((inp, i) => { inp.checked = (i === idx); }); + + // Wait a frame in case fonts/layout settle (prevents rare drift) requestAnimationFrame(() => setHighlightByIndex(idx)); + props.value = choices[idx]; if (shouldTrigger) trigger('change', props.value); } + + // Init setCheckedByValue(props.value ?? choices[0], false); + + // Input handlers inputs.forEach((inp) => { inp.addEventListener('change', () => setCheckedByValue(inp.value, true)); }); + + // Recalc on resize (important in Gradio layouts) window.addEventListener('resize', () => setHighlightByIndex(currentIdx)); + + // sync from Python (Examples / backend updates) let last = props.value; const syncFromProps = () => { - if (props.value !== last) { last = props.value; setCheckedByValue(last, false); } + if (props.value !== last) { + last = props.value; + setCheckedByValue(last, false); + } requestAnimationFrame(syncFromProps); }; requestAnimationFrame(syncFromProps); + })(); + """ - super().__init__(value=value, html_template=html_template, js_on_load=js_on_load, **kwargs) + + super().__init__( + value=value, + html_template=html_template, + js_on_load=js_on_load, + **kwargs + ) -# --- PART 12: CUSTOM COMPONENT - PromptBox (JS/HTML) --- +#################################################################################################### +### PART 13: Custom Component - PromptBox +#################################################################################################### class PromptBox(gr.HTML): - def __init__(self, value="", placeholder="اینجا بنویسید...", **kwargs): + """ + Prompt textarea with an internal footer slot (.ds-footer) where we can inject dropdowns. + """ + def __init__(self, value="", placeholder="Describe what you want...", **kwargs): uid = uuid.uuid4().hex[:8] + html_template = f"""
+ +
""" + js_on_load = r""" (() => { const textarea = element.querySelector(".ds-textarea"); if (!textarea) return; + const autosize = () => { textarea.style.height = "0px"; textarea.style.height = Math.min(textarea.scrollHeight, 240) + "px"; }; + const setValue = (v, triggerChange=false) => { const val = (v ?? ""); if (textarea.value !== val) textarea.value = val; @@ -430,49 +697,114 @@ class PromptBox(gr.HTML): props.value = textarea.value; if (triggerChange) trigger("change", props.value); }; + setValue(props.value, false); + textarea.addEventListener("input", () => { autosize(); props.value = textarea.value; trigger("change", props.value); }); + + // ✅ Focus-on-load (robust) + const shouldAutoFocus = () => { + // don’t steal focus if user already clicked/typed somewhere + const ae = document.activeElement; + if (ae && ae !== document.body && ae !== document.documentElement) return false; + // don’t auto-focus on small screens (optional; avoids mobile keyboard pop) + if (window.matchMedia && window.matchMedia("(max-width: 768px)").matches) return false; + return true; + }; + + const focusWithRetry = (tries = 30) => { + if (!shouldAutoFocus()) return; + // only focus if still not focused + if (document.activeElement !== textarea) textarea.focus({ preventScroll: true }); + if (document.activeElement === textarea) return; + if (tries > 0) requestAnimationFrame(() => focusWithRetry(tries - 1)); + }; + + // wait a tick so Gradio/layout settles + requestAnimationFrame(() => focusWithRetry()); + + // keep your sync loop let last = props.value; const syncFromProps = () => { - if (props.value !== last) { last = props.value; setValue(last, false); } + if (props.value !== last) { + last = props.value; + setValue(last, false); + } requestAnimationFrame(syncFromProps); }; requestAnimationFrame(syncFromProps); })(); + """ + super().__init__(value=value, html_template=html_template, js_on_load=js_on_load, **kwargs) -# --- PART 13: CUSTOM COMPONENT - CameraDropdown (JS/HTML) --- +#################################################################################################### +### PART 14: Custom Component - CameraDropdown +#################################################################################################### class CameraDropdown(gr.HTML): + """ + Custom dropdown (More-style) with optional icons per item. + Outputs: selected option string, e.g. "16:9" + """ def __init__(self, choices, value="None", title="Dropdown", **kwargs): + if not choices: + raise ValueError("CameraDropdown requires choices.") + + # Normalize choices -> list of dicts: {label, value, icon(optional)} norm = [] for c in choices: if isinstance(c, dict): - norm.append({"label": str(c.get("label")), "value": str(c.get("value")), "icon": c.get("icon")}) + label = str(c.get("label", c.get("value", ""))) + val = str(c.get("value", label)) + icon = c.get("icon", None) # emoji or svg/html + norm.append({"label": label, "value": val, "icon": icon}) else: s = str(c) norm.append({"label": s, "value": s, "icon": None}) + uid = uuid.uuid4().hex[:8] - + def render_item(item): - icon_html = f'{item["icon"]}' if item["icon"] else "" - return f'' - + icon_html = "" + if item["icon"]: + icon_html = f'{item["icon"]}' + return ( + f'' + ) + items_html = "\n".join(render_item(item) for item in norm) + html_template = f"""
- -
{title}
{items_html}
+ + +
""" + + # Pass a mapping value->label so the trigger can show label text + # (and still output value to Python) value_to_label = {it["value"]: it["label"] for it in norm} value_to_icon = {it["value"]: (it["icon"] or "") for it in norm} - + js_on_load = r""" (() => { const wrap = element.querySelector(".cd-wrap"); @@ -481,99 +813,239 @@ class CameraDropdown(gr.HTML): const triggerText = element.querySelector(".cd-trigger-text"); const menu = element.querySelector(".cd-menu"); const items = Array.from(element.querySelectorAll(".cd-item")); - if (!wrap || !trigger) return; + if (!wrap || !trigger || !menu || !items.length) return; + const valueToLabel = __VALUE_TO_LABEL__; const valueToIcon = __VALUE_TO_ICON__; + const safeLabel = (v) => (valueToLabel && valueToLabel[v]) ? valueToLabel[v] : (v ?? "None"); const safeIcon = (v) => (valueToIcon && valueToIcon[v]) ? valueToIcon[v] : ""; - function closeMenu() { menu.classList.remove("open"); } - function openMenu() { menu.classList.add("open"); } + + + function closeMenu() { + menu.classList.remove("open"); + trigger.setAttribute("aria-expanded", "false"); + menu.setAttribute("aria-hidden", "true"); + } + + function openMenu() { + menu.classList.add("open"); + trigger.setAttribute("aria-expanded", "true"); + menu.setAttribute("aria-hidden", "false"); + } + function setValue(val, shouldTrigger = false) { const v = (val ?? "None"); props.value = v; + + // Trigger shows LABEL only (icons stay in menu) triggerText.textContent = safeLabel(v); if (triggerIcon) { triggerIcon.innerHTML = safeIcon(v); triggerIcon.style.display = safeIcon(v) ? "inline-flex" : "none"; } - items.forEach(btn => { btn.dataset.selected = (btn.dataset.value === v) ? "true" : "false"; }); + + + items.forEach(btn => { + btn.dataset.selected = (btn.dataset.value === v) ? "true" : "false"; + }); + if (shouldTrigger) trigger("change", props.value); } - trigger.addEventListener("pointerdown", (e) => { e.preventDefault(); e.stopPropagation(); if (menu.classList.contains("open")) closeMenu(); else openMenu(); }); - document.addEventListener("pointerdown", (e) => { if (!wrap.contains(e.target)) closeMenu(); }, true); + + trigger.addEventListener("pointerdown", (e) => { + e.preventDefault(); + e.stopPropagation(); + if (menu.classList.contains("open")) closeMenu(); + else openMenu(); + }); + + document.addEventListener("pointerdown", (e) => { + if (!wrap.contains(e.target)) closeMenu(); + }, true); + + document.addEventListener("keydown", (e) => { + if (e.key === "Escape") closeMenu(); + }); + + wrap.addEventListener("focusout", (e) => { + if (!wrap.contains(e.relatedTarget)) closeMenu(); + }); + items.forEach((btn) => { btn.addEventListener("pointerdown", (e) => { - e.preventDefault(); e.stopPropagation(); closeMenu(); setValue(btn.dataset.value, true); + e.preventDefault(); + e.stopPropagation(); + closeMenu(); + setValue(btn.dataset.value, true); }); }); + + // init setValue((props.value ?? "None"), false); + + // sync from Python let last = props.value; - const syncFromProps = () => { if (props.value !== last) { last = props.value; setValue(last, false); } requestAnimationFrame(syncFromProps); }; + const syncFromProps = () => { + if (props.value !== last) { + last = props.value; + setValue(last, false); + } + requestAnimationFrame(syncFromProps); + }; requestAnimationFrame(syncFromProps); })(); """ - js_on_load = js_on_load.replace("__VALUE_TO_LABEL__", json.dumps(value_to_label)).replace("__VALUE_TO_ICON__", json.dumps(value_to_icon)) - super().__init__(value=value, html_template=html_template, js_on_load=js_on_load, **kwargs) + # Inject mapping into JS safely + import json + js_on_load = js_on_load.replace("__VALUE_TO_LABEL__", json.dumps(value_to_label)) + js_on_load = js_on_load.replace("__VALUE_TO_ICON__", json.dumps(value_to_icon)) -# --- PART 14: CUSTOM COMPONENT - PresetGallery (JS/HTML) --- + super().__init__( + value=value, + html_template=html_template, + js_on_load=js_on_load, + **kwargs + ) + + +#################################################################################################### +### PART 15: Custom Component - PresetGallery +#################################################################################################### class PresetGallery(gr.HTML): - def __init__(self, items, title="Click Example", **kwargs): + """ + Clickable image presets -> outputs selected index (int as string, then cast in python) + """ + def __init__(self, items, title="Click an Example", **kwargs): + """ + items: list[dict] with keys: + - thumb: str (path to image file, e.g. "supergirl.png") + - label: str (optional) + """ uid = uuid.uuid4().hex[:8] + cards_html = [] for i, it in enumerate(items): thumb = it["thumb"] label = it.get("label", "") - cards_html.append(f'') - html_template = f'
{title}
{"".join(cards_html)}
' - + # NOTE: if these are repo files, usually works on HF Spaces. + # If not, see note at the end for /file= usage. + cards_html.append(f""" + + """) + + html_template = f""" +
+
+
Examples
+
+
+ {''.join(cards_html)} +
+
+ """ + js_on_load = r""" (() => { + const wrap = element.querySelector(".pg-wrap"); const cards = Array.from(element.querySelectorAll(".pg-card")); + if (!wrap || !cards.length) return; + function setDim(activeIdx) { cards.forEach((c, i) => { c.dataset.dim = (activeIdx !== null && i !== activeIdx) ? "true" : "false"; c.dataset.active = (i === activeIdx) ? "true" : "false"; }); } - let lastSent = null; let lock = false; + + // prevent accidental double-fire (e.g. touch -> click) + let lastSent = null; + let lock = false; + cards.forEach((card) => { - card.addEventListener("pointerenter", () => setDim(Number(card.dataset.idx))); - card.addEventListener("pointerleave", () => setDim(null)); + card.addEventListener("pointerenter", () => { + setDim(Number(card.dataset.idx)); + }); + + card.addEventListener("pointerleave", () => { + setDim(null); + }); + + // Use pointerdown and suppress everything else card.addEventListener("pointerdown", (e) => { - e.preventDefault(); e.stopPropagation(); - if (lock) return; lock = true; setTimeout(() => (lock = false), 250); + e.preventDefault(); + e.stopPropagation(); + + if (lock) return; + lock = true; + setTimeout(() => (lock = false), 250); + const idx = String(card.dataset.idx); - if (idx === lastSent) return; lastSent = idx; + + // Only update if changed (prevents Gradio from emitting again) + if (idx === lastSent) return; + lastSent = idx; + + // ✅ Only set props.value — DO NOT trigger('change') props.value = idx; }, { passive: false }); }); + setDim(null); })(); """ + + super().__init__(value="", html_template=html_template, js_on_load=js_on_load, **kwargs) -# --- PART 15: CUSTOM COMPONENT - AudioDropUpload (JS/HTML) --- +#################################################################################################### +### PART 16: Custom Component - AudioDropUpload +#################################################################################################### class AudioDropUpload(gr.HTML): + """ + Custom audio drop/click UI that proxies file into a hidden gr.Audio component. + IMPORTANT: + - Pass target_audio_elem_id = elem_id of your hidden gr.Audio. + - The hidden gr.Audio must be type="filepath" (or whatever you need). + """ def __init__(self, target_audio_elem_id: str, value=None, **kwargs): uid = uuid.uuid4().hex[:8] + html_template = f"""
-
-
(اختیاری) فایل صوتی را اینجا رها کنید
-
...یا کلیک کنید برای انتخاب فایل
+
+
(Optional) Drag & drop an audio file here
+
…or click to browse
-
+
- +
""" + + # JS: + # - finds the hidden gr.Audio upload inside the component with elem_id=target_audio_elem_id + # - sets the selected file onto it (DataTransfer) and dispatches change js_on_load = """ (() => {{ - function grRoot() {{ const ga = document.querySelector("gradio-app"); return (ga && ga.shadowRoot) ? ga.shadowRoot : document; }} + // Helper: access Gradio shadow DOM safely + function grRoot() {{ + const ga = document.querySelector("gradio-app"); + return (ga && ga.shadowRoot) ? ga.shadowRoot : document; + }} const root = grRoot(); const wrap = element.querySelector(".aud-wrap"); const drop = element.querySelector(".aud-drop"); @@ -586,376 +1058,1625 @@ class AudioDropUpload(gr.HTML): function findHiddenAudioFileInput() {{ const host = root.querySelector("#" + CSS.escape(TARGET_ID)); if (!host) return null; - return host.querySelector('input[type="file"]'); + // Gradio's Audio component contains an for upload. + // This selector works in most Gradio 3/4 themes. + const inp = host.querySelector('input[type="file"]'); + return inp; + }} + function showDrop() {{ + drop.style.display = ""; + row.style.display = "none"; + label.style.display = "none"; + label.textContent = ""; + }} + function showPlayer(filename) {{ + drop.style.display = "none"; + row.style.display = "flex"; + if (filename) {{ + label.textContent = "Loaded: " + filename; + label.style.display = "block"; + }} }} - function showDrop() {{ drop.style.display = ""; row.style.display = "none"; label.style.display = "none"; }} - function showPlayer(filename) {{ drop.style.display = "none"; row.style.display = "flex"; if (filename) {{ label.textContent = "انتخاب شده: " + filename; label.style.display = "block"; }} }} function clearPreview() {{ - player.pause(); player.removeAttribute("src"); player.load(); - if (currentUrl) {{ URL.revokeObjectURL(currentUrl); currentUrl = null; }} + player.pause(); + player.removeAttribute("src"); + player.load(); + if (currentUrl) {{ + URL.revokeObjectURL(currentUrl); + currentUrl = null; + }} + }} + function clearHiddenGradioAudio() {{ + const fileInput = findHiddenAudioFileInput(); + if (!fileInput) return; + // Clear file input (works by replacing its files with empty DataTransfer) + fileInput.value = ""; + const dt = new DataTransfer(); + fileInput.files = dt.files; + fileInput.dispatchEvent(new Event("input", { bubbles: true })); + fileInput.dispatchEvent(new Event("change", { bubbles: true })); }} function clearAll() { clearPreview(); - const fileInput = findHiddenAudioFileInput(); - if (fileInput) { fileInput.value = ""; fileInput.dispatchEvent(new Event("input", { bubbles: true })); fileInput.dispatchEvent(new Event("change", { bubbles: true })); } - props.value = "__CLEAR__"; trigger("change", props.value); showDrop(); + + // Attempt DOM clear (still useful) + clearHiddenGradioAudio(); + + // Tell Gradio/Python explicitly to clear backend state + props.value = "__CLEAR__"; + trigger("change", props.value); + + showDrop(); } + function loadFileToPreview(file) {{ - if (!file || !file.type.startsWith("audio/")) {{ alert("لطفا یک فایل صوتی انتخاب کنید."); return; }} - clearPreview(); currentUrl = URL.createObjectURL(file); player.src = currentUrl; showPlayer(file.name); - const fileInput = findHiddenAudioFileInput(); - if (fileInput) {{ - const dt = new DataTransfer(); dt.items.add(file); fileInput.files = dt.files; - fileInput.dispatchEvent(new Event("input", {{ bubbles: true }})); fileInput.dispatchEvent(new Event("change", {{ bubbles: true }})); + if (!file) return; + if (!file.type || !file.type.startsWith("audio/")) {{ + alert("Please choose an audio file."); + return; }} + clearPreview(); + currentUrl = URL.createObjectURL(file); + player.src = currentUrl; + showPlayer(file.name); + }} + function pushFileIntoHiddenGradioAudio(file) { + const fileInput = findHiddenAudioFileInput(); + if (!fileInput) { + console.warn("Could not find hidden gr.File input. Check elem_id:", TARGET_ID); + return; + } + + // Hard reset (important for re-selecting same file) + fileInput.value = ""; + + const dt = new DataTransfer(); + dt.items.add(file); + fileInput.files = dt.files; + + // Trigger Gradio listeners + fileInput.dispatchEvent(new Event("input", { bubbles: true })); + fileInput.dispatchEvent(new Event("change", { bubbles: true })); + } + + function handleFile(file) {{ + loadFileToPreview(file); + pushFileIntoHiddenGradioAudio(file); + }} + // Click-to-browse uses a *local* ephemeral input (not Gradio’s), + // then we forward to hidden gr.Audio. const localPicker = document.createElement("input"); - localPicker.type = "file"; localPicker.accept = "audio/*"; localPicker.style.display = "none"; + localPicker.type = "file"; + localPicker.accept = "audio/*"; + localPicker.style.display = "none"; wrap.appendChild(localPicker); - localPicker.addEventListener("change", () => {{ const f = localPicker.files[0]; if (f) loadFileToPreview(f); localPicker.value = ""; }}); + localPicker.addEventListener("change", () => {{ + const f = localPicker.files && localPicker.files[0]; + if (f) handleFile(f); + localPicker.value = ""; + }}); drop.addEventListener("click", () => localPicker.click()); + drop.addEventListener("keydown", (e) => {{ + if (e.key === "Enter" || e.key === " ") {{ + e.preventDefault(); + localPicker.click(); + }} + }}); removeBtn.addEventListener("click", clearAll); - drop.addEventListener("dragover", (e) => {{ e.preventDefault(); drop.classList.add("dragover"); }}); + // Drag & drop + ["dragenter","dragover","dragleave","drop"].forEach(evt => {{ + drop.addEventListener(evt, (e) => {{ + e.preventDefault(); + e.stopPropagation(); + }}); + }}); + drop.addEventListener("dragover", () => drop.classList.add("dragover")); drop.addEventListener("dragleave", () => drop.classList.remove("dragover")); - drop.addEventListener("drop", (e) => {{ e.preventDefault(); drop.classList.remove("dragover"); const f = e.dataTransfer.files[0]; if (f) loadFileToPreview(f); }}); + drop.addEventListener("drop", (e) => {{ + drop.classList.remove("dragover"); + const f = e.dataTransfer.files && e.dataTransfer.files[0]; + if (f) handleFile(f); + }}); + // init showDrop(); + function setPreviewFromPath(path) { if (path === "__CLEAR__") path = null; - if (!path) { clearPreview(); showDrop(); return; } + + if (!path) { + clearPreview(); + showDrop(); + return; + } + + // If path already looks like a URL, use it directly + // otherwise serve it through Gradio's file route. let url = path; - if (!/^https?:\\/\\//.test(path) && !path.startsWith("gradio_api/file=") && !path.startsWith("/file=")) url = "gradio_api/file=" + path; - clearPreview(); player.src = url; showPlayer(path.split("/").pop()); + if (!/^https?:\/\//.test(path) && !path.startsWith("gradio_api/file=") && !path.startsWith("/file=")) { + url = "gradio_api/file=" + path; + } + + clearPreview(); + player.src = url; + showPlayer(path.split("/").pop()); } + + // ---- sync from Python (Examples / backend updates) ---- let last = props.value; - const syncFromProps = () => { const v = props.value; if (v !== last) { last = v; if (!v || v === "__CLEAR__") setPreviewFromPath(null); else setPreviewFromPath(String(v)); } requestAnimationFrame(syncFromProps); }; + const syncFromProps = () => { + const v = props.value; + + if (v !== last) { + last = v; + if (!v || v === "__CLEAR__") setPreviewFromPath(null); + else setPreviewFromPath(String(v)); + } + requestAnimationFrame(syncFromProps); + }; requestAnimationFrame(syncFromProps); + + }})(); """ js_on_load = js_on_load.replace("__TARGET_ID__", target_audio_elem_id) - super().__init__(value=value, html_template=html_template, js_on_load=js_on_load, **kwargs) + + super().__init__( + value=value, + html_template=html_template, + js_on_load=js_on_load, + **kwargs + ) + +#################################################################################################### +### PART 17: Wrapper Functions (Resolution, Duration, Examples) +#################################################################################################### +def generate_video_example(first_frame, prompt, camera_lora, resolution, radioanimated_mode, input_audio, end_frame, progress=gr.Progress(track_tqdm=True)): -# --- PART 16: MAIN GENERATION LOGIC (FUNCTIONS) --- -def get_duration(first_frame, end_frame, prompt, duration, input_video, radioanimated_mode, enhance_prompt, seed, randomize_seed, height, width, camera_lora, audio_path, progress): + w, h = apply_resolution(resolution) + + # We pass None for input_video in example generation since Motion Control is removed + duration_s = 5 # Default duration for examples + + with timer(f'generating with LoRA:{camera_lora} in {w}x{h}'): + output_video = generate_video( + first_frame, + end_frame, + prompt, + 10, + None, # input_video + radioanimated_mode, + True, + 42, + True, + h, + w, + camera_lora, + input_audio, + progress + ) + return output_video + +def get_duration( + first_frame, + end_frame, + prompt, + duration, + input_video, + radioanimated_mode, + enhance_prompt, + seed, + randomize_seed, + height, + width, + camera_lora, + audio_path, + progress +): extra_time = 0 - if audio_path is not None: extra_time += 10 - if input_video is not None: extra_time += 60 - if duration <= 3: return 60 + extra_time - elif duration <= 5: return 80 + extra_time - elif duration <= 10: return 120 + extra_time - else: return 180 + extra_time + if audio_path is not None: + extra_time += 10 + + if input_video is not None: + extra_time += 60 + + if duration <= 3: + return 60 + extra_time + elif duration <= 5: + return 80 + extra_time + elif duration <= 10: + return 120 + extra_time + else: + return 180 + extra_time + + +#################################################################################################### +### PART 18: Main Generation Function (Modified) +#################################################################################################### @spaces.GPU(duration=get_duration) -def generate_video(first_frame, end_frame, prompt, duration, input_video=None, generation_mode="تصویر به ویدیو", - enhance_prompt=True, seed=42, randomize_seed=True, height=DEFAULT_1_STAGE_HEIGHT, width=DEFAULT_1_STAGE_WIDTH, - camera_lora="هیچکدام", audio_path=None, progress=gr.Progress(track_tqdm=True)): - - # نگاشت نام‌های فارسی به منطق انگلیسی - if (camera_lora != "هیچکدام" or audio_path is not None) and duration == 15: - gr.Info("۱۵ ثانیه هنگام استفاده از LoRA یا لیپ‌سینک در دسترس نیست، به ۱۰ ثانیه کاهش یافت.") +def generate_video( + first_frame, + end_frame, + prompt: str, + duration: float, + input_video = None, + generation_mode = "Image-to-Video", + enhance_prompt: bool = True, + seed: int = 42, + randomize_seed: bool = True, + height: int = DEFAULT_1_STAGE_HEIGHT, + width: int = DEFAULT_1_STAGE_WIDTH, + camera_lora: str = "No LoRA", + audio_path = None, + progress=gr.Progress(track_tqdm=True), +): + """ + Generate a short cinematic video from a text prompt and optional input image using the LTX-2 distilled pipeline. + """ + + if (camera_lora != "No LoRA" or audio_path is not None) and duration == 15: + gr.Info("15s not avaiable when a LoRA or lipsync is activated, reducing to 10s for this generation") duration = 10 + if audio_path is None: + print(f'generating with duration:{duration} and LoRA:{camera_lora} in {width}x{height}') + else: + print(f'generating with duration:{duration} and audio in {width}x{height}') + + # Randomize seed if checkbox is enabled current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) + + # Calculate num_frames from duration (using fixed 24 fps) frame_rate = 24.0 - num_frames = int(duration * frame_rate) + 1 - + num_frames = int(duration * frame_rate) + 1 # +1 to ensure we meet the duration + video_seconds = int(duration) + with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: output_path = tmpfile.name + images = [] videos = [] - if generation_mode == "کنترل حرکت": - if input_video is not None: - cond_mp4, first_png, _ = prepare_conditioning_video_mp4_no_pad(input_video, num_frames, frame_rate) - if first_frame is None: images = [(first_png, 0, 1.0)] - if audio_path is None: - src_video_path = _coerce_video_path(input_video) - extracted_audio_tmp = extract_audio_wav_ffmpeg(src_video_path) - if extracted_audio_tmp is not None: audio_path = extracted_audio_tmp - - with timer("پیش‌پردازش ویدیو برای ژست..."): - cond_path = preprocess_video_to_pose_mp4(cond_mp4, width, height, frame_rate) - videos = [(cond_path, 1.0)] - camera_lora = "ژست (Pose)" + # Removed Motion Control block if first_frame is not None: + images = [] images.append((first_frame, 0, 1.0)) - if generation_mode == "درون‌یابی" and end_frame is not None: - end_idx = max(0, num_frames - 1) - images.append((end_frame, end_idx, 0.5)) - - embeddings, _, _ = encode_prompt(prompt, enhance_prompt, first_frame, current_seed) + if generation_mode == "Interpolate": + if end_frame is not None: + end_idx = max(0, num_frames - 1) + images.append((end_frame, end_idx, 0.5)) + + embeddings, final_prompt, status = encode_prompt( + prompt=prompt, + enhance_prompt=enhance_prompt, + input_image=first_frame, + seed=current_seed, + negative_prompt="", + ) + video_context = embeddings["video_context"].to("cuda", non_blocking=True) audio_context = embeddings["audio_context"].to("cuda", non_blocking=True) + print("✓ Embeddings loaded successfully") + + + # free prompt enhancer / encoder temps ASAP + del embeddings, final_prompt, status + torch.cuda.empty_cache() + + # ✅ if user provided audio, use a neutral audio_context + n_audio_context = None if audio_path is not None: with torch.inference_mode(): - _, n_audio_context = encode_text_simple(text_encoder, "") + _, n_audio_context = encode_text_simple(text_encoder, "") # returns tensors on GPU already + del audio_context audio_context = n_audio_context - if len(videos) == 0: camera_lora = "ثابت (Static)" - - # یافتن ایندکس LoRA بر اساس نام فارسی + + if len(videos) == 0: + camera_lora = "Static" + + torch.cuda.empty_cache() + + # Map dropdown name -> adapter index name_to_idx = {name: idx for name, idx in RUNTIME_LORA_CHOICES} selected_idx = name_to_idx.get(camera_lora, -1) + enable_only_lora(pipeline._transformer, selected_idx) torch.cuda.empty_cache() - input_waveform = None - input_waveform_sample_rate = None + # True video duration in seconds based on your rounding + video_seconds = (num_frames - 1) / frame_rate + if audio_path is not None: - video_seconds = (num_frames - 1) / frame_rate - input_waveform, input_waveform_sample_rate = match_audio_to_duration(audio_path, video_seconds) - - with torch.inference_mode(): - pipeline( - prompt=prompt, output_path=str(output_path), seed=current_seed, - height=height, width=width, num_frames=num_frames, frame_rate=frame_rate, - images=images, video_conditioning=videos, tiling_config=TilingConfig.default(), - video_context=video_context, audio_context=audio_context, - input_waveform=input_waveform, input_waveform_sample_rate=input_waveform_sample_rate, + input_waveform, input_waveform_sample_rate = match_audio_to_duration( + audio_path=audio_path, + target_seconds=video_seconds, + target_sr=48000, # pick what your model expects; 48k is common for AV models + to_mono=True, # set False if your model wants stereo + pad_mode="silence", # or "repeat" if you prefer looping over silence + device="cuda", ) - return str(output_path) + else: + input_waveform = None + input_waveform_sample_rate = None + + with timer(f'generating with LoRA:{camera_lora} in {width}x{height}'): + with torch.inference_mode(): + pipeline( + prompt=prompt, + output_path=str(output_path), + seed=current_seed, + height=height, + width=width, + num_frames=num_frames, + frame_rate=frame_rate, + images=images, + video_conditioning=videos, + tiling_config=TilingConfig.default(), + video_context=video_context, + audio_context=audio_context, + input_waveform=input_waveform, + input_waveform_sample_rate=input_waveform_sample_rate, + ) + del video_context, audio_context + torch.cuda.empty_cache() + print("successful generation") + return str(output_path) -# --- PART 17: HELPER LOGIC (RESOLUTION/DURATION) --- -def generate_video_example(first_frame, prompt, camera_lora, resolution, radioanimated_mode, input_video, input_audio, end_frame): - w, h = apply_resolution(resolution) - return generate_video(first_frame, end_frame, prompt, 10, input_video, radioanimated_mode, True, 42, True, h, w, camera_lora, input_audio) def apply_resolution(resolution: str): - if resolution == "16:9": w, h = 768, 512 - elif resolution == "1:1": w, h = 512, 512 - elif resolution == "9:16": w, h = 512, 768 - else: w, h = 768, 512 + + if resolution == "16:9": + w, h = 768, 512 + elif resolution == "1:1": + w, h = 512, 512 + elif resolution == "9:16": + w, h = 512, 768 + return int(w), int(h) def apply_duration(duration: str): - return int(duration[:-1]) + duration_s = int(duration[:-1]) + return duration_s def on_mode_change(selected: str): - is_motion = (selected == "کنترل حرکت") - is_interpolate = (selected == "درون‌یابی") + is_motion = (selected == "Motion Control") # Will be False now + is_interpolate = (selected == "Interpolate") + return (gr.update(visible=is_motion), gr.update(visible=is_interpolate)) -# --- PART 18: CSS STYLING (MOBILE & RTL ADJUSTMENTS) --- +#################################################################################################### +### PART 19: CSS Styles +#################################################################################################### css = """ - /* RTL & Font Settings */ - body, .gradio-container, .prose, input, textarea, button, label { - direction: rtl; - text-align: right; - font-family: 'Tahoma', 'Arial', sans-serif !important; - } - .ds-textarea { text-align: right !important; direction: rtl !important; } - - /* Center Modes */ - #mode-row { display: flex !important; justify-content: center !important; width: 100% !important; margin-bottom: 10px; } - #radioanimated_mode { display: inline-flex !important; justify-content: center !important; } - /* Controls Row */ + /* Make the row behave nicely */ #controls-row { - display: flex; - align-items: center; - gap: 12px; - flex-wrap: wrap; /* Allow wrapping on mobile */ - justify-content: center; - } - - /* Mobile Responsive Layout */ - @media (max-width: 768px) { - #controls-row { flex-direction: column; width: 100%; } - #controls-row > * { width: 100% !important; max-width: 100% !important; } - .ds-footer { position: relative !important; bottom: 0 !important; right: 0 !important; justify-content: center !important; flex-wrap: wrap; margin-top: 10px; } - .ds-textarea { padding-bottom: 10px !important; } - .cd-menu { width: 100% !important; left: 0 !important; right: 0 !important; } - .ra-inner { flex-direction: column; width: 100%; } - .ra-highlight { display: none; } /* Disable highlight animation on stack mode for simplicity */ - .ra-label { width: 100%; text-align: center; border-radius: 8px; margin: 2px 0; } - .ra-input:checked + .ra-label { background: #8bff97; color: black; } - #col-container { padding: 5px; } - } - - /* General Styles */ - #col-container { margin: 0 auto; max-width: 1600px; } - #step-column { padding: 15px; border-radius: 12px; box-shadow: 0 4px 6px rgba(0,0,0,0.1); margin: 10px; background: rgba(255,255,255,0.05); } + display: none !important; + align-items: center; + gap: 12px; + flex-wrap: nowrap; /* or wrap if you prefer on small screens */ + } + + /* Stop these components from stretching */ + #controls-row > * { + flex: 0 0 auto !important; + width: auto !important; + min-width: 0 !important; + } + + + #col-container { + margin: 0 auto; + max-width: 1600px; + } + #modal-container { + width: 100vw; /* Take full viewport width */ + height: 100vh; /* Take full viewport height (optional) */ + display: flex; + justify-content: center; /* Center content horizontally */ + align-items: center; /* Center content vertically if desired */ + } + #modal-content { + width: 100%; + max-width: 700px; /* Limit content width */ + margin: 0 auto; + border-radius: 8px; + padding: 1.5rem; + } + #step-column { + padding: 10px; + border-radius: 8px; + box-shadow: var(--card-shadow); + margin: 10px; + } + #col-showcase { + margin: 0 auto; + max-width: 1100px; + } .button-gradient { - background: linear-gradient(45deg, #FF416C, #FF4B2B); - border: none; padding: 14px 28px; font-size: 16px; font-weight: bold; color: white; - border-radius: 12px; cursor: pointer; transition: 0.3s; width: 100%; - } - .button-gradient:hover { transform: scale(1.02); } - - /* Custom Component Styles */ - .ra-wrap { width: fit-content; margin: 0 auto; } - .ra-inner { display: inline-flex; background: #0b0b0b; border-radius: 20px; padding: 5px; position: relative; } - .ra-label { padding: 10px 20px; color: rgba(255,255,255,0.7); cursor: pointer; z-index: 2; position: relative; transition: color 0.2s; } - .ra-highlight { position: absolute; background: #8bff97; border-radius: 15px; height: calc(100% - 10px); top: 5px; z-index: 1; transition: all 0.2s; } - .ra-input:checked + .ra-label { color: black; font-weight: bold; } - - .ds-card { width: 100%; max-width: 100%; } - .ds-top { border: 1px solid rgba(255,255,255,0.2); border-radius: 15px; background: #2b2b2b; position: relative; } - .ds-textarea { width: 100%; background: transparent; border: none; color: white; padding: 15px; outline: none; resize: none; font-size: 15px; } - .ds-footer { display: flex; gap: 10px; position: absolute; left: 10px; bottom: 10px; z-index: 5; } /* Changed right to left for RTL */ - - .cd-wrap { position: relative; display: inline-block; } - .cd-trigger { background: #0b0b0b; color: white; padding: 10px 20px; border-radius: 20px; border: none; cursor: pointer; display: flex; align-items: center; gap: 8px; } - .cd-menu { position: absolute; top: 110%; left: 0; background: #2b2b2b; border: 1px solid #444; border-radius: 10px; padding: 5px; opacity: 0; pointer-events: none; transition: 0.2s; z-index: 100; min-width: 150px; } - .cd-menu.open { opacity: 1; pointer-events: auto; } - .cd-item { display: flex; width: 100%; padding: 8px; background: transparent; border: none; color: white; text-align: right; cursor: pointer; gap: 8px; } - .cd-item:hover { background: rgba(255,255,255,0.1); border-radius: 5px; } - - .pg-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(100px, 1fr)); gap: 10px; } - .pg-img { width: 100%; border-radius: 10px; cursor: pointer; transition: 0.2s; } - .pg-img:hover { transform: scale(1.05); } - - .aud-drop { border: 2px dashed #666; padding: 20px; text-align: center; border-radius: 15px; cursor: pointer; color: white; background: rgba(255,255,255,0.05); } - .aud-row { display: none; align-items: center; gap: 10px; background: #0b0b0b; padding: 5px 15px; border-radius: 20px; margin-top: 10px; } - .aud-player { flex: 1; } - .aud-remove { background: none; border: none; color: white; font-size: 18px; cursor: pointer; } + background: linear-gradient(45deg, rgb(255, 65, 108), rgb(255, 75, 43), rgb(255, 155, 0), rgb(255, 65, 108)) 0% 0% / 400% 400%; + border: none; + padding: 14px 28px; + font-size: 16px; + font-weight: bold; + color: white; + border-radius: 10px; + cursor: pointer; + transition: 0.3s ease-in-out; + animation: 2s linear 0s infinite normal none running gradientAnimation; + box-shadow: rgba(255, 65, 108, 0.6) 0px 4px 10px; + } + .toggle-container { + display: inline-flex; + background-color: #ffd6ff; /* light pink background */ + border-radius: 9999px; + padding: 4px; + position: relative; + width: fit-content; + font-family: sans-serif; + } + .toggle-container input[type="radio"] { + display: none; + } + .toggle-container label { + position: relative; + z-index: 2; + flex: 1; + text-align: center; + font-weight: 700; + color: #4b2ab5; /* dark purple text for unselected */ + padding: 6px 22px; + border-radius: 9999px; + cursor: pointer; + transition: color 0.25s ease; + } + /* Moving highlight */ + .toggle-highlight { + position: absolute; + top: 4px; + left: 4px; + width: calc(50% - 4px); + height: calc(100% - 8px); + background-color: #4b2ab5; /* dark purple background */ + border-radius: 9999px; + transition: transform 0.25s ease; + z-index: 1; + } + /* When "True" is checked */ + #true:checked ~ label[for="true"] { + color: #ffd6ff; /* light pink text */ + } + /* When "False" is checked */ + #false:checked ~ label[for="false"] { + color: #ffd6ff; /* light pink text */ + } + /* Move highlight to right side when False is checked */ + #false:checked ~ .toggle-highlight { + transform: translateX(100%); + } + + /* Center items inside that row */ + #mode-row{ + justify-content: center !important; + align-items: center !important; + } + + /* Center the mode row contents */ + #mode-row { + display: flex !important; + justify-content: center !important; + align-items: center !important; + width: 100% !important; + } + + /* Stop Gradio from making children stretch */ + #mode-row > * { + flex: 0 0 auto !important; + width: auto !important; + min-width: 0 !important; + } + + /* Specifically ensure the HTML component wrapper doesn't take full width */ + #mode-row .gr-html, + #mode-row .gradio-html, + #mode-row .prose, + #mode-row .block { + width: auto !important; + flex: 0 0 auto !important; + display: inline-block !important; + } + + /* Center the pill itself */ + #radioanimated_mode { + display: inline-flex !important; + justify-content: center !important; + width: auto !important; + } + + """ + +css += """ + .cd-trigger-icon{ + color: rgba(255,255,255,0.9); + display: inline-flex; + align-items: center; + justify-content: center; + width: 18px; + height: 18px; + } + .cd-trigger-icon svg { + width: 18px; + height: 18px; + display: block; + } + """ + + +css += """ + /* ---- radioanimated ---- */ + .ra-wrap{ + width: fit-content; + } + .ra-inner{ + position: relative; + display: inline-flex; + align-items: center; + gap: 0; + padding: 6px; + background: #0b0b0b; + border-radius: 9999px; + overflow: hidden; + user-select: none; + } + .ra-input{ + display: none; + } + .ra-label{ + position: relative; + z-index: 2; + padding: 10px 18px; + font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial; + font-size: 14px; + font-weight: 600; + color: rgba(255,255,255,0.7); + cursor: pointer; + transition: color 180ms ease; + white-space: nowrap; + } + .ra-highlight{ + position: absolute; + z-index: 1; + top: 6px; + left: 6px; + height: calc(100% - 12px); + border-radius: 9999px; + background: #8bff97; /* green knob */ + transition: transform 200ms ease, width 200ms ease; + } + /* selected label becomes darker like your screenshot */ + .ra-input:checked + .ra-label{ + color: rgba(0,0,0,0.75); + } + """ + +css += """ +.cd-icn svg{ + width: 18px; + height: 18px; + display: block; +} +.cd-icn svg *{ + stroke: rgba(255,255,255,0.9); +} """ -# --- PART 19: GRADIO UI LAYOUT --- -with gr.Blocks(title="LTX-2 ویدیو ساز", css=css) as demo: - gr.HTML(""" -
-

🎥 LTX-2 Distilled: ساخت ویدیو با هوش مصنوعی

-

متن بنویسید، تصویر بدهید و ویدیو تحویل بگیرید.

+css += """ + /* --- prompt box --- */ + .ds-prompt{ + width: 100%; + max-width: 720px; + margin-top: 3px; + } + + .ds-textarea{ + width: 100%; + box-sizing: border-box; + background: #2b2b2b; + color: rgba(255,255,255,0.9); + border: 1px solid rgba(255,255,255,0.12); + border-radius: 14px; + padding: 14px 16px; + outline: none; + font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial; + font-size: 15px; + line-height: 1.35; + resize: none; + min-height: 210px; + max-height: 210px; + overflow-y: auto; + + /* IMPORTANT: space for the footer controls */ + padding-bottom: 72px; + } + + + .ds-card{ + width: 100%; + max-width: 720px; + margin: 0 auto; + } + .ds-top{ + position: relative; + } + + /* Make room for footer inside textarea */ + .ds-textarea{ + padding-bottom: 72px; + } + + /* Footer positioning */ + .ds-footer{ + position: absolute; + right: 12px; + bottom: 10px; + display: flex; + gap: 8px; + align-items: center; + justify-content: flex-end; + z-index: 3; + } + + /* Smaller pill buttons inside footer */ + .ds-footer .cd-trigger{ + min-height: 32px; + padding: 6px 10px; + font-size: 12px; + gap: 6px; + border-radius: 9999px; + } + .ds-footer .cd-trigger-icon, + .ds-footer .cd-icn{ + width: 14px; + height: 14px; + } + .ds-footer .cd-trigger-icon svg, + .ds-footer .cd-icn svg{ + width: 14px; + height: 14px; + } + .ds-footer .cd-caret{ + font-size: 11px; + } + + /* Bottom safe area bar (optional but looks nicer) */ + .ds-top::after{ + content: ""; + position: absolute; + left: 1px; + right: 1px; + bottom: 1px; + height: 56px; + background: #2b2b2b; + border-bottom-left-radius: 13px; + border-bottom-right-radius: 13px; + pointer-events: none; + z-index: 2; + } + + """ + +css += """ + /* ---- camera dropdown ---- */ + + /* 1) Fix overlap: make the Gradio HTML block shrink-to-fit when it contains a CameraDropdown. + Gradio uses .gr-html for HTML components in most versions; older themes sometimes use .gradio-html. + This keeps your big header HTML unaffected because it doesn't contain .cd-wrap. + */ + + /* 2) Actual dropdown layout */ + .cd-wrap{ + position: relative; + display: inline-block; + } + + /* 3) Match RadioAnimated pill size/feel */ + .cd-trigger{ + margin-top: 2px; + display: inline-flex; + align-items: center; + justify-content: center; + gap: 10px; + + border: none; + + box-sizing: border-box; + padding: 10px 18px; + min-height: 52px; + line-height: 1.2; + + border-radius: 9999px; + background: #0b0b0b; + + font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial; + font-size: 14px; + + /* ✅ match .ra-label exactly */ + color: rgba(255,255,255,0.7) !important; + font-weight: 600 !important; + + cursor: pointer; + user-select: none; + white-space: nowrap; + } + + /* Ensure inner spans match too */ + .cd-trigger .cd-trigger-text, + .cd-trigger .cd-caret{ + color: rgba(255,255,255,0.7) !important; + } + + /* keep caret styling */ + .cd-caret{ + opacity: 0.8; + font-weight: 900; + } + + /* 4) Ensure menu overlays neighbors and isn't clipped */ + /* Move dropdown a tiny bit up (closer to the trigger) */ + .cd-menu{ + position: absolute; + top: calc(100% + 4px); /* was +10px */ + left: 0; + + min-width: 240px; + background: #2b2b2b; + border: 1px solid rgba(255,255,255,0.14); + border-radius: 14px; + box-shadow: 0 18px 40px rgba(0,0,0,0.35); + padding: 10px; + + opacity: 0; + transform: translateY(-6px); + pointer-events: none; + transition: opacity 160ms ease, transform 160ms ease; + + z-index: 9999; + } + + .cd-title{ + font-size: 12px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.04em; + + color: rgba(255,255,255,0.45); /* 👈 muted grey */ + margin-bottom: 6px; + padding: 0 6px; + pointer-events: none; /* title is non-interactive */ + } + + + .cd-menu.open{ + opacity: 1; + transform: translateY(0); + pointer-events: auto; + } + + .cd-items{ + display: flex; + flex-direction: column; + gap: 0px; /* tighter, more like a native menu */ + } + + /* Items: NO "boxed" buttons by default */ + .cd-item{ + width: 100%; + text-align: left; + border: none; + background: transparent; /* ✅ removes box look */ + color: rgba(255,255,255,0.92); + padding: 8px 34px 8px 12px; /* right padding leaves room for tick */ + border-radius: 10px; /* only matters on hover */ + cursor: pointer; + + font-size: 14px; + font-weight: 700; + + position: relative; + transition: background 120ms ease; + } + + /* “Box effect” only on hover (not always) */ + .cd-item:hover{ + background: rgba(255,255,255,0.08); + } + + /* Tick on the right ONLY on hover */ + .cd-item::after{ + content: "✓"; + position: absolute; + right: 12px; + top: 50%; + transform: translateY(-50%); + opacity: 0; /* hidden by default */ + transition: opacity 120ms ease; + color: rgba(255,255,255,0.9); + font-weight: 900; + } + + /* show tick ONLY for selected item */ + .cd-item[data-selected="true"]::after{ + opacity: 1; + } + + /* keep hover box effect, but no tick change */ + .cd-item:hover{ + background: rgba(255,255,255,0.08); + } + + + /* Kill any old “selected” styling just in case */ + .cd-item.selected{ + background: transparent !important; + border: none !important; + } + + + """ + +css += """ +/* icons in dropdown items */ +.cd-item{ + display: flex; + align-items: center; + gap: 10px; +} +.cd-icn{ + display: inline-flex; + align-items: center; + justify-content: center; + width: 18px; + height: 18px; + flex: 0 0 18px; +} +.cd-label{ + flex: 1; +} + +/* ========================= + FIX: prompt border + scrollbar bleed + ========================= */ + +/* Put the border + background on the wrapper, not the textarea */ +.ds-top{ + position: relative; + background: #2b2b2b; + border: 1px solid rgba(255,255,255,0.12); + border-radius: 14px; + overflow: hidden; /* ensures the footer bar is clipped to rounded corners */ +} + +/* Make textarea "transparent" so wrapper owns the border/background */ +.ds-textarea{ + background: transparent !important; + border: none !important; + border-radius: 0 !important; /* wrapper handles radius */ + outline: none; + + /* keep your spacing */ + padding: 14px 16px; + padding-bottom: 72px; /* room for footer */ + width: 100%; + box-sizing: border-box; + + /* keep scroll behavior */ + overflow-y: auto; + + /* prevent scrollbar bleed by hiding native scrollbar */ + scrollbar-width: none; /* Firefox */ +} +.ds-textarea::-webkit-scrollbar{ /* Chrome/Safari */ + width: 0; + height: 0; +} + +/* Safe-area bar: now it matches perfectly because it's inside the same bordered wrapper */ +.ds-top::after{ + content: ""; + position: absolute; + left: 0; + right: 0; + bottom: 0; + height: 56px; + background: #2b2b2b; + pointer-events: none; + z-index: 2; +} + +/* Footer above the bar */ +.ds-footer{ + position: absolute; + right: 12px; + bottom: 10px; + display: flex; + gap: 8px; + align-items: center; + justify-content: flex-end; + z-index: 3; +} + +/* Ensure textarea content sits below overlays */ +.ds-textarea{ + position: relative; + z-index: 1; +} + +/* ===== FIX dropdown menu being clipped/behind ===== */ + +/* Let the dropdown menu escape the prompt wrapper */ +.ds-top{ + overflow: visible !important; /* IMPORTANT: do not clip the menu */ +} + +/* Keep the rounded "safe area" look without clipping the menu */ +.ds-top::after{ + left: 0 !important; + right: 0 !important; + bottom: 0 !important; + border-bottom-left-radius: 14px !important; + border-bottom-right-radius: 14px !important; +} + +/* Ensure the footer stays above the safe-area bar */ +.ds-footer{ + z-index: 20 !important; +} + +/* Make sure the opened menu is above EVERYTHING */ +.ds-footer .cd-menu{ + z-index: 999999 !important; +} + +/* Sometimes Gradio/columns/cards create stacking contexts; + force the whole prompt card above nearby panels */ +.ds-card{ + position: relative; + z-index: 50; +} + +/* --- Fix focus highlight shape (make it match rounded container) --- */ + +/* Kill any theme focus ring on the textarea itself */ +.ds-textarea:focus, +.ds-textarea:focus-visible{ + outline: none !important; + box-shadow: none !important; +} + +/* Optional: if some themes apply it even when not focused */ +.ds-textarea{ + outline: none !important; +} + +/* Apply the focus ring to the rounded wrapper instead */ +.ds-top:focus-within{ + border-color: rgba(255,255,255,0.22) !important; + box-shadow: 0 0 0 3px rgba(255,255,255,0.06) !important; + border-radius: 14px !important; +} + +/* If you see any tiny square corners, ensure the wrapper clips its own shadow properly */ +.ds-top{ + border-radius: 14px !important; +} + +/* ========================= + CameraDropdown: force readable menu text in BOTH themes + ========================= */ + +/* Menu surface */ +.cd-menu{ + background: #2b2b2b !important; + border: 1px solid rgba(255,255,255,0.14) !important; +} + +/* Title */ +.cd-title{ + color: rgba(255,255,255,0.55) !important; +} + +/* Items + all descendants (fixes spans / inherited theme colors) */ +.cd-item, +.cd-item *{ + color: rgba(255,255,255,0.92) !important; +} + +/* Hover state */ +.cd-item:hover{ + background: rgba(255,255,255,0.10) !important; +} + +/* Checkmark */ +.cd-item::after{ + color: rgba(255,255,255,0.92) !important; +} + +/* (Optional) make sure the trigger stays readable too */ +.cd-trigger, +.cd-trigger *{ + color: rgba(255,255,255,0.75) !important; +} + +/* ---- preset gallery ---- */ +.pg-wrap{ + width: 100%; + max-width: 1100px; + margin: 18px auto 0 auto; +} +.pg-title{ + text-align: center; + margin-bottom: 14px; +} +.pg-h1{ + font-size: 34px; + font-weight: 800; + line-height: 1.1; + + /* ✅ theme-aware */ + color: var(--body-text-color); +} +.pg-h2{ + font-size: 14px; + font-weight: 600; + color: var(--body-text-color-subdued); + margin-top: 6px; +} + +.pg-grid{ + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); /* 3 per row */ + gap: 18px; +} + +.pg-card{ + border: none; + background: transparent; + padding: 0; + cursor: pointer; + border-radius: 12px; + overflow: hidden; + position: relative; + transform: translateZ(0); +} + +.pg-img{ + width: 100%; + height: 220px; /* adjust to match your look */ + object-fit: cover; + display: block; + border-radius: 12px; + transition: transform 160ms ease, filter 160ms ease, opacity 160ms ease; +} + +/* hover: slight zoom on hovered card */ +.pg-card:hover .pg-img{ + transform: scale(1.02); +} + +/* dim others while hovering */ +.pg-card[data-dim="true"] .pg-img{ + opacity: 0.35; + filter: saturate(0.9); +} + +/* keep hovered/active crisp */ +.pg-card[data-active="true"] .pg-img{ + opacity: 1.0; + filter: none; +} + + +""" + + +css += """ +/* ---- AudioDropUpload ---- */ +.aud-wrap{ + width: 100%; + max-width: 720px; +} +.aud-drop{ + border: 2px dashed var(--body-text-color-subdued); + border-radius: 16px; + padding: 18px; + text-align: center; + cursor: pointer; + user-select: none; + color: var(--body-text-color); + background: var(--block-background-fill); +} +.aud-drop.dragover{ + border-color: rgba(255,255,255,0.35); + background: rgba(255,255,255,0.06); +} +.aud-hint{ + color: var(--body-text-color-subdued); + font-size: 0.95rem; + margin-top: 6px; +} +/* pill row like your other controls */ +.aud-row{ + display: none; + align-items: center; + gap: 10px; + background: #0b0b0b; + border-radius: 9999px; + padding: 8px 10px; +} +.aud-player{ + flex: 1; + width: 100%; + height: 34px; + border-radius: 9999px; +} +.aud-remove{ + appearance: none; + border: none; + background: transparent; + color: rgba(255,255,255); + cursor: pointer; + width: 36px; + height: 36px; + border-radius: 9999px; + display: inline-flex; + align-items: center; + justify-content: center; + padding: 0; + transition: background 120ms ease, color 120ms ease, opacity 120ms ease; + opacity: 0.9; + flex: 0 0 auto; +} +.aud-remove:hover{ + background: rgba(255,255,255,0.08); + color: rgb(255,255,255); + opacity: 1; +} +.aud-filelabel{ + margin: 10px 6px 0; + color: var(--body-text-color-subdued); + font-size: 0.95rem; + display: none; +} +#audio_input_hidden { display: none !important; } +""" + + +def apply_example(idx: str): + idx = int(idx) + + # Read the example row from your list + img, prompt_txt, cam, res, mode, vid, aud, end_img = examples_list[idx] + + img_path = img if img else None + vid_path = vid if vid else None + aud_path = aud if aud else None + + input_image_update = img_path + prompt_update = prompt_txt + camera_update = cam + resolution_update = res + mode_update = mode + video_update = gr.update(value=vid_path, visible=(mode == "Motion Control")) + audio_update = aud_path + end_image = end_img + + return ( + input_image_update, + prompt_update, + camera_update, + resolution_update, + mode_update, + video_update, + audio_update, + audio_update, + end_image, + ) + + +#################################################################################################### +### PART 20: Gradio UI Layout & Launch +#################################################################################################### +with gr.Blocks(title="LTX-2 Video Distilled 🎥🔈") as demo: + + gr.HTML( + """ +
+

+ LTX-2 Distilled DiT-based audio-video foundation model +

+ + [model] + +
+
+ HF Space by: + + +
- """) + """ + ) with gr.Column(elem_id="col-container"): with gr.Row(elem_id="mode-row"): radioanimated_mode = RadioAnimated( - choices=["تصویر به ویدیو", "درون‌یابی", "کنترل حرکت"], - value="تصویر به ویدیو", + choices=["Image-to-Video", "Interpolate"], + value="Image-to-Video", elem_id="radioanimated_mode" ) - with gr.Row(): - # ستون ورودی‌ها with gr.Column(elem_id="step-column"): + with gr.Row(): - first_frame = gr.Image(label="فریم اول (تصویر شروع)", type="filepath", height=256) - end_frame = gr.Image(label="فریم آخر (پایان)", type="filepath", height=256, visible=False) - input_video = gr.Video(label="ویدیو مرجع حرکت", height=256, visible=False) + + first_frame = gr.Image( + label="First Frame (Optional)", + type="filepath", + height=256 + ) + + end_frame = gr.Image( + label="Last Frame (Optional)", + type="filepath", + height=256, + visible=False, + ) + + # input_video (Motion Control) removed - # انتقال کنترل‌های دراپ‌دان به داخل باکس پرامپت با JS - relocate = gr.HTML(value="", html_template="
", js_on_load=r""" + relocate = gr.HTML( + value="", + html_template="
", + js_on_load=r""" (() => { function moveIntoFooter() { const promptRoot = document.querySelector("#prompt_ui"); if (!promptRoot) return false; + const footer = promptRoot.querySelector(".ds-footer"); if (!footer) return false; + const dur = document.querySelector("#duration_ui .cd-wrap"); const res = document.querySelector("#resolution_ui .cd-wrap"); const cam = document.querySelector("#camera_ui .cd-wrap"); + if (!dur || !res || !cam) return false; - footer.appendChild(dur); footer.appendChild(res); footer.appendChild(cam); + + footer.appendChild(dur); + footer.appendChild(res); + footer.appendChild(cam); + return true; } - const tick = () => { if (!moveIntoFooter()) requestAnimationFrame(tick); }; + + const tick = () => { + if (!moveIntoFooter()) requestAnimationFrame(tick); + }; requestAnimationFrame(tick); })(); - """) - - prompt_ui = PromptBox(value="این تصویر را زنده کن با حرکات سینمایی و نرم...", elem_id="prompt_ui") + """ + ) - # ورودی مخفی صدا - audio_input = gr.File(label="صدا", file_types=["audio"], type="filepath", elem_id="audio_input_hidden", visible=False) - audio_ui = AudioDropUpload(target_audio_elem_id="audio_input_hidden", elem_id="audio_ui") - # متغیرهای مخفی برای نگهداری مقادیر - prompt = gr.Textbox(label="متن پرامپت", value="", visible=False) - enhance_prompt = gr.Checkbox(label="بهبود خودکار پرامپت", value=True, visible=False) + prompt_ui = PromptBox( + value="Make this image come alive with cinematic motion, smooth animation", + elem_id="prompt_ui", + ) + + # Hidden real audio input (backend value) + audio_input = gr.File( + label="Audio (Optional)", + file_types=["audio"], + type="filepath", + elem_id="audio_input_hidden", + ) + + # Custom UI that feeds the hidden gr.Audio above + audio_ui = AudioDropUpload( + target_audio_elem_id="audio_input_hidden", + elem_id="audio_ui", + ) + + prompt = gr.Textbox( + label="Prompt", + value="Make this image come alive with cinematic motion, smooth animation", + lines=3, + max_lines=3, + placeholder="Describe the motion and animation you want...", + visible=False + ) + + enhance_prompt = gr.Checkbox( + label="Enhance Prompt", + value=True, + visible=False + ) + + with gr.Accordion("Advanced Settings", open=False, visible=False): + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + value=DEFAULT_SEED, + step=1 + ) - with gr.Accordion("تنظیمات پیشرفته", open=False, visible=True): - seed = gr.Slider(label="Seed (دانه تصادفی)", minimum=0, maximum=MAX_SEED, value=DEFAULT_SEED, step=1) - randomize_seed = gr.Checkbox(label="Seed تصادفی", value=True) + randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) - # ستون خروجی + with gr.Column(elem_id="step-column"): - output_video = gr.Video(label="ویدیوی تولید شده", autoplay=True, height=512) - - with gr.Row(elem_id="controls-row"): - # کنترل‌های مخفی (نمایش در فوتر پرامپت) - duration_ui = CameraDropdown( - choices=["3s", "5s", "10s", "15s"], value="5s", title="مدت زمان", elem_id="duration_ui" - ) - duration = gr.Number(value=5.0, visible=False) + output_video = gr.Video(label="Generated Video", autoplay=True, height=512) - ICON_16_9 = """""" - ICON_1_1 = """""" - ICON_9_16 = """""" + with gr.Row(elem_id="controls-row"): + + duration_ui = CameraDropdown( + choices=["3s", "5s", "10s", "15s"], + value="5s", + title="Clip Duration", + elem_id="duration_ui" + ) - resolution_ui = CameraDropdown( - choices=[ - {"label": "16:9", "value": "16:9", "icon": ICON_16_9}, - {"label": "1:1", "value": "1:1", "icon": ICON_1_1}, - {"label": "9:16", "value": "9:16", "icon": ICON_9_16}, - ], - value="16:9", title="سایز", elem_id="resolution_ui" - ) - width = gr.Number(value=DEFAULT_1_STAGE_WIDTH, visible=False) - height = gr.Number(value=DEFAULT_1_STAGE_HEIGHT, visible=False) + duration = gr.Slider( + label="Duration (seconds)", + minimum=1.0, + maximum=15.0, + value=5.0, + step=0.1, + visible=False + ) + + ICON_16_9 = """""" + + ICON_1_1 = """""" + + ICON_9_16 = """""" + + + resolution_ui = CameraDropdown( + choices=[ + {"label": "16:9", "value": "16:9", "icon": ICON_16_9}, + {"label": "1:1", "value": "1:1", "icon": ICON_1_1}, + {"label": "9:16", "value": "9:16", "icon": ICON_9_16}, + ], + value="16:9", + title="Resolution", + elem_id="resolution_ui" + ) + + + width = gr.Number(label="Width", value=DEFAULT_1_STAGE_WIDTH, precision=0, visible=False) + height = gr.Number(label="Height", value=DEFAULT_1_STAGE_HEIGHT, precision=0, visible=False) + + camera_ui = CameraDropdown( + choices=[name for name, _ in VISIBLE_RUNTIME_LORA_CHOICES], + value="No LoRA", + title="Camera LoRA", + elem_id="camera_ui", + ) + + # Hidden real dropdown (backend value) + camera_lora = gr.Dropdown( + label="Camera Control LoRA", + choices=[name for name, _ in VISIBLE_RUNTIME_LORA_CHOICES], + value="No LoRA", + visible=False + ) + + generate_btn = gr.Button("🤩 Generate Video", variant="primary", elem_classes="button-gradient") - camera_ui = CameraDropdown( - choices=[name for name, _ in RUNTIME_LORA_CHOICES], - value="هیچکدام", title="حرکت دوربین", elem_id="camera_ui" - ) - camera_lora = gr.Textbox(value="هیچکدام", visible=False) - generate_btn = gr.Button("✨ تولید ویدیو", variant="primary", elem_classes="button-gradient") + camera_ui.change( + fn=lambda x: x, + inputs=camera_ui, + outputs=camera_lora, + api_visibility="private" + ) - # اتصال رویدادها (Events) - camera_ui.change(lambda x: x, camera_ui, camera_lora) - radioanimated_mode.change(on_mode_change, radioanimated_mode, [input_video, end_frame]) - duration_ui.change(apply_duration, duration_ui, duration) - resolution_ui.change(apply_resolution, resolution_ui, [width, height]) - prompt_ui.change(lambda x: x, prompt_ui, prompt) - - # مدیریت آپلود صدا - def on_audio_ui_change(v): - return None if (v == "__CLEAR__" or not v) else gr.update() - audio_ui.change(on_audio_ui_change, audio_ui, audio_input) + radioanimated_mode.change( + fn=on_mode_change, + inputs=radioanimated_mode, + outputs=[input_video, end_frame], + api_visibility="private", + ) + + + duration_ui.change( + fn=apply_duration, + inputs=duration_ui, + outputs=[duration], + api_visibility="private" + ) + resolution_ui.change( + fn=apply_resolution, + inputs=resolution_ui, + outputs=[width, height], + api_visibility="private" + ) + prompt_ui.change( + fn=lambda x: x, + inputs=prompt_ui, + outputs=prompt, + api_visibility="private" + ) + generate_btn.click( fn=generate_video, - inputs=[first_frame, end_frame, prompt, duration, input_video, radioanimated_mode, enhance_prompt, seed, randomize_seed, height, width, camera_lora, audio_input], + inputs=[ + first_frame, + end_frame, + prompt, + duration, + gr.State(None), # Placeholder for input_video which is removed + radioanimated_mode, + enhance_prompt, + seed, + randomize_seed, + height, + width, + camera_lora, + audio_input + ], outputs=[output_video] ) - - # گالری نمونه‌ها + examples_list = [ - ["examples/supergirl-2.png", "شخصیت عروسکی ابرقهرمان زن در حال بیدار شدن...", "ثابت (Static)", "16:9", "تصویر به ویدیو", None, "examples/supergirl.m4a", None], - ["examples/frame3.png", "زنی در لباس سفید در حال برداشتن انار...", "زوم به داخل", "16:9", "درون‌یابی", None, None, "examples/frame4.png"], - ["examples/clay.png", "شخصیت خمیری در حال رقص تیک‌تاک...", "هیچکدام", "9:16", "کنترل حرکت", "examples/tiktok.mp4", None, None], + [ + "examples/supergirl-2.png", + "A fuzzy puppet superhero character resembling a female puppet with blonde hair and a blue superhero suit sleeping in bed and just waking up, she gradually gets up, rubbing her eyes and looking at her dog that just popped on the bed. the scene feels chaotic, comedic, and emotional with expressive puppet reactions, cinematic lighting, smooth camera motion, shallow depth of field, and high-quality puppet-style animation", + "Static", + "16:9", + "Image-to-Video", + None, + "examples/supergirl.m4a", + None, + ], + [ + "examples/frame3.png", + "a woman in a white dress standing in a supermarket, looking at a stack of pomegranates, she picks one and takes a bite, the camera zooms in to a close up of the pomegranate seeds. A calm music is playing in the supermarket and you can hear her taking a bite.", + "Zoom In", + "16:9", + "Interpolate", + None, + None, + "examples/frame4.png", + ], + [ + "examples/supergirl.png", + "A fuzzy puppet superhero character resembling a female puppet with blonde hair and a blue superhero suit stands inside an icy cave made of frozen walls and icicles, she looks panicked and frantic, rapidly turning her head left and right and scanning the cave while waving her arms and shouting angrily and desperately, mouthing the words “where the hell is my dog,” her movements exaggerated and puppet-like with high energy and urgency, suddenly a second puppet dog bursts into frame from the side, jumping up excitedly and tackling her affectionately while licking her face repeatedly, she freezes in surprise and then breaks into relief and laughter as the dog continues licking her, the scene feels chaotic, comedic, and emotional with expressive puppet reactions, cinematic lighting, smooth camera motion, shallow depth of field, and high-quality puppet-style animation", + "No LoRA", + "16:9", + "Image-to-Video", + None, + None, + None, + ], + [ + "examples/highland.png", + "Realistic POV selfie-style video in a snowy, foggy field. Two shaggy Highland cows with long curved horns stand ahead. The camera is handheld and slightly shaky. The woman filming talks nervously and excitedly in a vlog tone: \"Oh my god guys… look how big those horns are… I’m kinda scared.\" The cow on the left walks toward the camera in a cute, bouncy, hopping way, curious and gentle. Snow crunches under its hooves, breath visible in the cold air. The horns look massive from the POV. As the cow gets very close, its wet nose with slight dripping fills part of the frame. She laughs nervously but reaches out and pets the cow. The cow makes deep, soft, interesting mooing and snorting sounds, calm and friendly. Ultra-realistic, natural lighting, immersive audio, documentary-style realism.", + "No LoRA", + "16:9", + "Image-to-Video", + None, + None, + None, + ], + [ + "examples/wednesday.png", + "A cinematic dolly out of Wednesday Addams frozen mid-dance on a dark, blue-lit ballroom floor as students move indistinctly behind her, their footsteps and muffled music reduced to a distant, underwater thrum; the audio foregrounds her steady breathing and the faint rustle of fabric as she slowly raises one arm, never breaking eye contact with the camera, then after a deliberately long silence she speaks in a flat, dry, perfectly controlled voice, “I don’t dance… I vibe code,” each word crisp and unemotional, followed by an abrupt cutoff of her voice as the background sound swells slightly, reinforcing the deadpan humor, with precise lip sync, minimal facial movement, stark gothic lighting, and cinematic realism.", + "Zoom Out", + "16:9", + "Image-to-Video", + None, + None, + None, + ], + [ + "examples/astronaut.png", + "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot.", + "Static", + "1:1", + "Image-to-Video", + None, + None, + None, + ], ] - + + examples_obj = create_examples( + examples=examples_list, + fn=generate_video_example, + inputs=[first_frame, prompt_ui, camera_ui, resolution_ui, radioanimated_mode, audio_input, end_frame], + outputs = [output_video], + label="Examples", + cache_examples=True, + visible=False + ) + preset_gallery = PresetGallery( items=[ - {"thumb": "examples/supergirl-2.png", "label": "عروسک + صدا"}, - {"thumb": "examples/frame3.png", "label": "ابتدا و انتها"}, - {"thumb": "examples/clay.png", "label": "ژست حرکتی"}, + {"thumb": "examples/supergirl-2.png", "label": "Example 1", "title": "Image + Audio to Video" }, + {"thumb": "examples/frame3.png" , "label": "Example 2", "title": "First and Last Frame" }, + {"thumb": "examples/supergirl.png" , "label": "Example 3", "title": "Image to Video" }, + {"thumb": "examples/highland.png" , "label": "Example 6", "title": "Image to Video" }, + {"thumb": "examples/wednesday.png" , "label": "Example 7", "title": "Image to Video" }, + {"thumb": "examples/astronaut.png" , "label": "Example 8", "title": "Image to Video" }, + ], - title="نمونه‌های آماده (کلیک کنید)" + title="Click on Our Examples", ) + + def on_audio_ui_change(v): + # Our JS sends "__CLEAR__" when the user presses the X + if v == "__CLEAR__" or v is None or v == "": + return None + # For normal events (uploads), do nothing (keep whatever gr.File already has) + return gr.update() - # اجرای نمونه‌ها - def apply_example(idx): + audio_ui.change( + fn=on_audio_ui_change, + inputs=audio_ui, + outputs=audio_input, + api_visibility="private", + ) + + + def run_cached_example_by_index(idx): idx = int(idx) - ex = examples_list[idx] - return ex[0], ex[1], ex[2], ex[3], ex[4], (gr.update(value=ex[5], visible=(ex[4]=="کنترل حرکت"))), ex[6], ex[6], ex[7] + cached_outputs = examples_obj.load_from_cache(idx) + return cached_outputs[0] if len(cached_outputs) == 1 else cached_outputs + preset_gallery.change( - apply_example, preset_gallery, - [first_frame, prompt_ui, camera_ui, resolution_ui, radioanimated_mode, input_video, audio_input, audio_ui, end_frame] + fn=apply_example, + inputs=preset_gallery, + outputs=[ + first_frame, + prompt_ui, + camera_ui, + resolution_ui, + radioanimated_mode, + input_video, # Placeholder update + audio_input, + audio_ui, + end_frame, + ], + api_visibility="private", + ).then( + fn=run_cached_example_by_index, + inputs=preset_gallery, + outputs=[output_video], + postprocess=False, + api_visibility="private", ) - -# --- PART 20: MAIN EXECUTION & LAUNCH --- if __name__ == "__main__": - demo.launch(ssr_mode=False, allowed_paths=["./examples"]) \ No newline at end of file + demo.launch(ssr_mode=False, mcp_server=True, css=css, allowed_paths=["./examples"]) \ No newline at end of file