some improvements

#2
by linoyts HF Staff - opened
.gitattributes CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  astronaut.jpg filter=lfs diff=lfs merge=lfs -text
37
  kill_bill.jpeg filter=lfs diff=lfs merge=lfs -text
38
  cat_selfie.JPG filter=lfs diff=lfs merge=lfs -text
 
 
36
  astronaut.jpg filter=lfs diff=lfs merge=lfs -text
37
  kill_bill.jpeg filter=lfs diff=lfs merge=lfs -text
38
  cat_selfie.JPG filter=lfs diff=lfs merge=lfs -text
39
+ wednesday.png filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -9,6 +9,9 @@ import numpy as np
9
  import random
10
  import spaces
11
  import gradio as gr
 
 
 
12
  from typing import Optional
13
  from huggingface_hub import hf_hub_download
14
  from ltx_pipelines.ti2vid_two_stages import TI2VidTwoStagesPipeline
@@ -33,11 +36,13 @@ DEFAULT_PROMPT = "An astronaut hatches from a fragile egg on the surface of the
33
 
34
  # HuggingFace Hub defaults
35
  DEFAULT_REPO_ID = "Lightricks/LTX-2"
36
- DEFAULT_GEMMA_REPO_ID = "google/gemma-3-12b-it-qat-q4_0-unquantized"
37
  DEFAULT_CHECKPOINT_FILENAME = "ltx-2-19b-dev-fp8.safetensors"
38
  DEFAULT_DISTILLED_LORA_FILENAME = "ltx-2-19b-distilled-lora-384.safetensors"
39
  DEFAULT_SPATIAL_UPSAMPLER_FILENAME = "ltx-2-spatial-upscaler-x2-1.0.safetensors"
40
 
 
 
 
41
  def get_hub_or_local_checkpoint(repo_id: Optional[str] = None, filename: Optional[str] = None):
42
  """Download from HuggingFace Hub or use local checkpoint."""
43
  if repo_id is None and filename is None:
@@ -68,28 +73,40 @@ print(f"Initializing pipeline with:")
68
  print(f" checkpoint_path={checkpoint_path}")
69
  print(f" distilled_lora_path={distilled_lora_path}")
70
  print(f" spatial_upsampler_path={spatial_upsampler_path}")
71
- print(f" gemma_root={DEFAULT_GEMMA_REPO_ID}")
72
 
 
 
73
  pipeline = TI2VidTwoStagesPipeline(
74
  checkpoint_path=checkpoint_path,
75
  distilled_lora_path=distilled_lora_path,
76
  distilled_lora_strength=DEFAULT_LORA_STRENGTH,
77
  spatial_upsampler_path=spatial_upsampler_path,
78
- gemma_root=DEFAULT_GEMMA_REPO_ID,
79
  loras=[],
80
  fp8transformer=False,
81
  local_files_only=False
82
  )
83
 
 
 
 
 
 
 
 
 
 
84
  @spaces.GPU(duration=300)
85
  def generate_video(
86
  input_image,
87
  prompt: str,
88
  duration: float,
 
89
  negative_prompt: str = DEFAULT_NEGATIVE_PROMPT,
90
  seed: int = 42,
91
  randomize_seed: bool = True,
92
- num_inference_steps: int = DEFAULT_NUM_INFERENCE_STEPS,
93
  cfg_guidance_scale: float = DEFAULT_CFG_GUIDANCE_SCALE,
94
  height: int = DEFAULT_HEIGHT,
95
  width: int = DEFAULT_WIDTH,
@@ -107,20 +124,64 @@ def generate_video(
107
  # Create output directory if it doesn't exist
108
  output_dir = Path("outputs")
109
  output_dir.mkdir(exist_ok=True)
110
- output_path = output_dir / f"video_{seed}.mp4"
111
 
112
  # Handle image input
113
  images = []
 
114
  if input_image is not None:
115
  # Save uploaded image temporarily
116
- temp_image_path = output_dir / f"temp_input_{seed}.jpg"
117
  if hasattr(input_image, 'save'):
118
  input_image.save(temp_image_path)
119
  else:
120
  # If it's a file path already
121
- temp_image_path = input_image
122
  # Format: (image_path, frame_idx, strength)
123
  images = [(str(temp_image_path), 0, 1.0)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  # Run inference - progress automatically tracks tqdm from pipeline
126
  pipeline(
@@ -136,6 +197,10 @@ def generate_video(
136
  cfg_guidance_scale=cfg_guidance_scale,
137
  images=images,
138
  tiling_config=TilingConfig.default(),
 
 
 
 
139
  )
140
 
141
  return str(output_path), current_seed
@@ -166,13 +231,18 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
166
  placeholder="Describe the motion and animation you want..."
167
  )
168
 
169
- duration = gr.Slider(
170
- label="Duration (seconds)",
171
- minimum=1.0,
172
- maximum=10.0,
173
- value=3.0,
174
- step=0.1
175
- )
 
 
 
 
 
176
 
177
  generate_btn = gr.Button("Generate Video", variant="primary")
178
 
@@ -200,7 +270,7 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
200
  label="Inference Steps",
201
  minimum=1,
202
  maximum=100,
203
- value=DEFAULT_NUM_INFERENCE_STEPS,
204
  step=1
205
  )
206
 
@@ -233,6 +303,7 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
233
  input_image,
234
  prompt,
235
  duration,
 
236
  negative_prompt,
237
  seed,
238
  randomize_seed,
@@ -248,14 +319,19 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
248
  gr.Examples(
249
  examples=[
250
  [
251
- "astronaut.jpg",
252
- "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot.",
253
  5.0,
254
  ],
255
  [
256
- "kill_bill.jpeg",
257
- "A low, subsonic drone pulses as Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. A faint electrical hum fills the silence. Suddenly, accompanied by a deep metallic groan, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. Discordant strings swell as the blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen—each drip producing a wet, viscous stretching sound. The transformation starts subtly at first—a slight bend in the blade—then accelerates as the metal becomes increasingly fluid, the groaning intensifying. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. She whispers under her breath, voice flat with disbelief: 'Wait, what?' Her heartbeat rises in the mix—thump... thump-thump—as her breathing quickens slightly while she witnesses this impossible transformation. Sharp violin stabs punctuate each breath. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft, bell-like pings. Unintelligible whispers fade in and out as her expression shifts from calm readiness to bewilderment and concern, her heartbeat now pounding like a war drum, as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented. All sound cuts to silence—then a single devastating bass drop as the final droplet falls, leaving only her unsteady breathing in the dark.",
258
  5.0,
 
 
 
 
 
259
  ]
260
  ],
261
  fn=generate_video,
 
9
  import random
10
  import spaces
11
  import gradio as gr
12
+ from gradio_client import Client, handle_file
13
+ import torch
14
+ from pathlib import Path
15
  from typing import Optional
16
  from huggingface_hub import hf_hub_download
17
  from ltx_pipelines.ti2vid_two_stages import TI2VidTwoStagesPipeline
 
36
 
37
  # HuggingFace Hub defaults
38
  DEFAULT_REPO_ID = "Lightricks/LTX-2"
 
39
  DEFAULT_CHECKPOINT_FILENAME = "ltx-2-19b-dev-fp8.safetensors"
40
  DEFAULT_DISTILLED_LORA_FILENAME = "ltx-2-19b-distilled-lora-384.safetensors"
41
  DEFAULT_SPATIAL_UPSAMPLER_FILENAME = "ltx-2-spatial-upscaler-x2-1.0.safetensors"
42
 
43
+ # Text encoder space URL
44
+ TEXT_ENCODER_SPACE = "linoyts/gemma-text-encoder"
45
+
46
  def get_hub_or_local_checkpoint(repo_id: Optional[str] = None, filename: Optional[str] = None):
47
  """Download from HuggingFace Hub or use local checkpoint."""
48
  if repo_id is None and filename is None:
 
73
  print(f" checkpoint_path={checkpoint_path}")
74
  print(f" distilled_lora_path={distilled_lora_path}")
75
  print(f" spatial_upsampler_path={spatial_upsampler_path}")
76
+ print(f" text_encoder_space={TEXT_ENCODER_SPACE}")
77
 
78
+ # Initialize pipeline WITHOUT text encoder (gemma_root=None)
79
+ # Text encoding will be done by external space
80
  pipeline = TI2VidTwoStagesPipeline(
81
  checkpoint_path=checkpoint_path,
82
  distilled_lora_path=distilled_lora_path,
83
  distilled_lora_strength=DEFAULT_LORA_STRENGTH,
84
  spatial_upsampler_path=spatial_upsampler_path,
85
+ gemma_root=None,
86
  loras=[],
87
  fp8transformer=False,
88
  local_files_only=False
89
  )
90
 
91
+ # Initialize text encoder client
92
+ print(f"Connecting to text encoder space: {TEXT_ENCODER_SPACE}")
93
+ try:
94
+ text_encoder_client = Client(TEXT_ENCODER_SPACE)
95
+ print("✓ Text encoder client connected!")
96
+ except Exception as e:
97
+ print(f"⚠ Warning: Could not connect to text encoder space: {e}")
98
+ text_encoder_client = None
99
+
100
  @spaces.GPU(duration=300)
101
  def generate_video(
102
  input_image,
103
  prompt: str,
104
  duration: float,
105
+ enhance_prompt: bool = True,
106
  negative_prompt: str = DEFAULT_NEGATIVE_PROMPT,
107
  seed: int = 42,
108
  randomize_seed: bool = True,
109
+ num_inference_steps: int = 25,
110
  cfg_guidance_scale: float = DEFAULT_CFG_GUIDANCE_SCALE,
111
  height: int = DEFAULT_HEIGHT,
112
  width: int = DEFAULT_WIDTH,
 
124
  # Create output directory if it doesn't exist
125
  output_dir = Path("outputs")
126
  output_dir.mkdir(exist_ok=True)
127
+ output_path = output_dir / f"video_{current_seed}.mp4"
128
 
129
  # Handle image input
130
  images = []
131
+ temp_image_path = None # Initialize to None
132
  if input_image is not None:
133
  # Save uploaded image temporarily
134
+ temp_image_path = output_dir / f"temp_input_{current_seed}.jpg"
135
  if hasattr(input_image, 'save'):
136
  input_image.save(temp_image_path)
137
  else:
138
  # If it's a file path already
139
+ temp_image_path = Path(input_image)
140
  # Format: (image_path, frame_idx, strength)
141
  images = [(str(temp_image_path), 0, 1.0)]
142
+ # Get embeddings from text encoder space
143
+ print(f"Encoding prompt: {prompt}")
144
+
145
+ if text_encoder_client is None:
146
+ raise RuntimeError(
147
+ f"Text encoder client not connected. Please ensure the text encoder space "
148
+ f"({TEXT_ENCODER_SPACE}) is running and accessible."
149
+ )
150
+
151
+ try:
152
+ # Prepare image for upload if it exists
153
+ image_input = None
154
+ if temp_image_path is not None:
155
+ image_input = handle_file(str(temp_image_path))
156
+
157
+ result = text_encoder_client.predict(
158
+ prompt=prompt,
159
+ enhance_prompt=enhance_prompt,
160
+ input_image=image_input,
161
+ seed=current_seed,
162
+ negative_prompt=negative_prompt,
163
+ api_name="/encode_prompt"
164
+ )
165
+ embedding_path = result[0] # Path to .pt file
166
+ print(f"Embeddings received from: {embedding_path}")
167
+
168
+ # Load embeddings
169
+ embeddings = torch.load(embedding_path)
170
+ video_context_positive = embeddings['video_context']
171
+ audio_context_positive = embeddings['audio_context']
172
+
173
+ # Load negative contexts if available
174
+ video_context_negative = embeddings.get('video_context_negative', None)
175
+ audio_context_negative = embeddings.get('audio_context_negative', None)
176
+
177
+ print("✓ Embeddings loaded successfully")
178
+ if video_context_negative is not None:
179
+ print(" ✓ Negative prompt embeddings also loaded")
180
+ except Exception as e:
181
+ raise RuntimeError(
182
+ f"Failed to get embeddings from text encoder space: {e}\n"
183
+ f"Please ensure {TEXT_ENCODER_SPACE} is running properly."
184
+ )
185
 
186
  # Run inference - progress automatically tracks tqdm from pipeline
187
  pipeline(
 
197
  cfg_guidance_scale=cfg_guidance_scale,
198
  images=images,
199
  tiling_config=TilingConfig.default(),
200
+ video_context_positive=video_context_positive,
201
+ audio_context_positive=audio_context_positive,
202
+ video_context_negative=video_context_negative,
203
+ audio_context_negative=audio_context_negative,
204
  )
205
 
206
  return str(output_path), current_seed
 
231
  placeholder="Describe the motion and animation you want..."
232
  )
233
 
234
+ with gr.Row():
235
+ duration = gr.Slider(
236
+ label="Duration (seconds)",
237
+ minimum=1.0,
238
+ maximum=10.0,
239
+ value=3.0,
240
+ step=0.1
241
+ )
242
+ enhance_prompt = gr.Checkbox(
243
+ label="Enhance Prompt",
244
+ value=True
245
+ )
246
 
247
  generate_btn = gr.Button("Generate Video", variant="primary")
248
 
 
270
  label="Inference Steps",
271
  minimum=1,
272
  maximum=100,
273
+ value=25,
274
  step=1
275
  )
276
 
 
303
  input_image,
304
  prompt,
305
  duration,
306
+ enhance_prompt,
307
  negative_prompt,
308
  seed,
309
  randomize_seed,
 
319
  gr.Examples(
320
  examples=[
321
  [
322
+ "kill_bill.jpeg",
323
+ "A low, subsonic drone pulses as Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. A faint electrical hum fills the silence. Suddenly, accompanied by a deep metallic groan, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. Discordant strings swell as the blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen—each drip producing a wet, viscous stretching sound. The transformation starts subtly at first—a slight bend in the blade—then accelerates as the metal becomes increasingly fluid, the groaning intensifying. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. She whispers under her breath, voice flat with disbelief: 'Wait, what?' Her heartbeat rises in the mix—thump... thump-thump—as her breathing quickens slightly while she witnesses this impossible transformation. Sharp violin stabs punctuate each breath. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft, bell-like pings. Unintelligible whispers fade in and out as her expression shifts from calm readiness to bewilderment and concern, her heartbeat now pounding like a war drum, as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented. All sound cuts to silence—then a single devastating bass drop as the final droplet falls, leaving only her unsteady breathing in the dark.",
324
  5.0,
325
  ],
326
  [
327
+ "wednesday.png",
328
+ "A cinematic close-up of Wednesday Addams frozen mid-dance on a dark, blue-lit ballroom floor as students move indistinctly behind her, their footsteps and muffled music reduced to a distant, underwater thrum; the audio foregrounds her steady breathing and the faint rustle of fabric as she slowly raises one arm, never breaking eye contact with the camera, then after a deliberately long silence she speaks in a flat, dry, perfectly controlled voice, “I don’t dance… I vibe code,” each word crisp and unemotional, followed by an abrupt cutoff of her voice as the background sound swells slightly, reinforcing the deadpan humor, with precise lip sync, minimal facial movement, stark gothic lighting, and cinematic realism.",
329
  5.0,
330
+ ],
331
+ [
332
+ "astronaut.jpg",
333
+ "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot.",
334
+ 3.0,
335
  ]
336
  ],
337
  fn=generate_video,
packages/ltx-pipelines/src/ltx_pipelines/ti2vid_two_stages.py CHANGED
@@ -22,6 +22,7 @@ from ltx_pipelines.pipeline_utils import (
22
  denoise_audio_video,
23
  encode_text,
24
  euler_denoising_loop,
 
25
  guider_denoising_func,
26
  simple_denoising_func,
27
  )
@@ -90,6 +91,10 @@ class TI2VidTwoStagesPipeline:
90
  cfg_guidance_scale: float,
91
  images: list[tuple[str, int, float]],
92
  tiling_config: TilingConfig | None = None,
 
 
 
 
93
  ) -> None:
94
  generator = torch.Generator(device=self.device).manual_seed(seed)
95
  noiser = GaussianNoiser(generator=generator)
@@ -97,14 +102,23 @@ class TI2VidTwoStagesPipeline:
97
  cfg_guider = CFGGuider(cfg_guidance_scale)
98
  dtype = torch.bfloat16
99
 
100
- text_encoder = self.stage_1_model_ledger.text_encoder()
101
- context_p, context_n = encode_text(text_encoder, prompts=[prompt, negative_prompt])
102
- v_context_p, a_context_p = context_p
103
- v_context_n, a_context_n = context_n
 
 
 
104
 
105
- torch.cuda.synchronize()
106
- del text_encoder
107
- utils.cleanup_memory()
 
 
 
 
 
 
108
 
109
  # Stage 1: Initial low resolution video generation.
110
  video_encoder = self.stage_1_model_ledger.video_encoder()
@@ -170,7 +184,18 @@ class TI2VidTwoStagesPipeline:
170
  def second_stage_denoising_loop(
171
  sigmas: torch.Tensor, video_state: LatentState, audio_state: LatentState, stepper: DiffusionStepProtocol
172
  ) -> tuple[LatentState, LatentState]:
173
- return euler_denoising_loop(
 
 
 
 
 
 
 
 
 
 
 
174
  sigmas=sigmas,
175
  video_state=video_state,
176
  audio_state=audio_state,
@@ -180,6 +205,7 @@ class TI2VidTwoStagesPipeline:
180
  audio_context=a_context_p,
181
  transformer=transformer, # noqa: F821
182
  ),
 
183
  )
184
 
185
  stage_2_output_shape = VideoPixelShape(
 
22
  denoise_audio_video,
23
  encode_text,
24
  euler_denoising_loop,
25
+ gradient_estimating_euler_denoising_loop,
26
  guider_denoising_func,
27
  simple_denoising_func,
28
  )
 
91
  cfg_guidance_scale: float,
92
  images: list[tuple[str, int, float]],
93
  tiling_config: TilingConfig | None = None,
94
+ video_context_positive: torch.Tensor | None = None,
95
+ audio_context_positive: torch.Tensor | None = None,
96
+ video_context_negative: torch.Tensor | None = None,
97
+ audio_context_negative: torch.Tensor | None = None,
98
  ) -> None:
99
  generator = torch.Generator(device=self.device).manual_seed(seed)
100
  noiser = GaussianNoiser(generator=generator)
 
102
  cfg_guider = CFGGuider(cfg_guidance_scale)
103
  dtype = torch.bfloat16
104
 
105
+ # Use pre-computed embeddings if provided, otherwise encode text
106
+ if (video_context_positive is None or audio_context_positive is None or
107
+ video_context_negative is None or audio_context_negative is None):
108
+ text_encoder = self.stage_1_model_ledger.text_encoder()
109
+ context_p, context_n = encode_text(text_encoder, prompts=[prompt, negative_prompt])
110
+ v_context_p, a_context_p = context_p
111
+ v_context_n, a_context_n = context_n
112
 
113
+ torch.cuda.synchronize()
114
+ del text_encoder
115
+ utils.cleanup_memory()
116
+ else:
117
+ # Move pre-computed embeddings to device if needed
118
+ v_context_p = video_context_positive.to(self.device)
119
+ a_context_p = audio_context_positive.to(self.device)
120
+ v_context_n = video_context_negative.to(self.device)
121
+ a_context_n = audio_context_negative.to(self.device)
122
 
123
  # Stage 1: Initial low resolution video generation.
124
  video_encoder = self.stage_1_model_ledger.video_encoder()
 
184
  def second_stage_denoising_loop(
185
  sigmas: torch.Tensor, video_state: LatentState, audio_state: LatentState, stepper: DiffusionStepProtocol
186
  ) -> tuple[LatentState, LatentState]:
187
+ # return euler_denoising_loop(
188
+ # sigmas=sigmas,
189
+ # video_state=video_state,
190
+ # audio_state=audio_state,
191
+ # stepper=stepper,
192
+ # denoise_fn=simple_denoising_func(
193
+ # video_context=v_context_p,
194
+ # audio_context=a_context_p,
195
+ # transformer=transformer, # noqa: F821
196
+ # ),
197
+ # )
198
+ return gradient_estimating_euler_denoising_loop(
199
  sigmas=sigmas,
200
  video_state=video_state,
201
  audio_state=audio_state,
 
205
  audio_context=a_context_p,
206
  transformer=transformer, # noqa: F821
207
  ),
208
+ ge_gamma=2.0, # Gradient estimation coefficient
209
  )
210
 
211
  stage_2_output_shape = VideoPixelShape(
wednesday.png ADDED

Git LFS Details

  • SHA256: 27193cae97c24a31ab574a21fc3c598627c28ab60edb0c60209acdb8071cf1ea
  • Pointer size: 132 Bytes
  • Size of remote file: 1.36 MB