linoyts HF Staff commited on
Commit
2e243c5
·
verified ·
1 Parent(s): 737cc06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -85
app.py CHANGED
@@ -42,9 +42,11 @@ import gradio as gr
42
  import numpy as np
43
  from huggingface_hub import hf_hub_download, snapshot_download
44
 
 
 
45
  from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number
46
  from ltx_core.quantization import QuantizationPolicy
47
- from ltx_pipelines.distilled import DistilledPipeline
48
  from ltx_pipelines.utils.args import ImageConditioningInput
49
  from ltx_pipelines.utils.media_io import encode_video
50
 
@@ -61,12 +63,6 @@ except Exception as e:
61
  logging.getLogger().setLevel(logging.INFO)
62
 
63
  MAX_SEED = np.iinfo(np.int32).max
64
- DEFAULT_PROMPT = (
65
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
66
- "the shell cracking and peeling apart in gentle low-gravity motion. "
67
- "Fine lunar dust lifts and drifts outward with each movement, floating "
68
- "in slow arcs before settling back onto the ground."
69
- )
70
  DEFAULT_FRAME_RATE = 24.0
71
 
72
  # Resolution presets: (width, height)
@@ -76,52 +72,104 @@ RESOLUTIONS = {
76
  }
77
 
78
  # Model repos
79
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
80
  GEMMA_REPO = "google/gemma-3-12b-it-qat-q4_0-unquantized"
81
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  # Download model checkpoints
83
  print("=" * 80)
84
- print("Downloading LTX-2.3 distilled model + Gemma...")
85
  print("=" * 80)
86
 
87
  checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
88
  spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
89
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
90
 
 
 
 
 
 
 
 
91
  print(f"Checkpoint: {checkpoint_path}")
92
  print(f"Spatial upsampler: {spatial_upsampler_path}")
93
  print(f"Gemma root: {gemma_root}")
94
 
95
- # Initialize pipeline WITH text encoder
96
- pipeline = DistilledPipeline(
97
- distilled_checkpoint_path=checkpoint_path,
98
- spatial_upsampler_path=spatial_upsampler_path,
99
- gemma_root=gemma_root,
100
- loras=[],
101
- quantization=QuantizationPolicy.fp8_cast(),
102
- )
103
 
104
- # Preload all models for ZeroGPU tensor packing.
105
- print("Preloading all models (including Gemma)...")
106
- ledger = pipeline.model_ledger
107
- _transformer = ledger.transformer()
108
- _video_encoder = ledger.video_encoder()
109
- _video_decoder = ledger.video_decoder()
110
- _audio_decoder = ledger.audio_decoder()
111
- _vocoder = ledger.vocoder()
112
- _spatial_upsampler = ledger.spatial_upsampler()
113
- _text_encoder = ledger.text_encoder()
114
- _embeddings_processor = ledger.gemma_embeddings_processor()
115
-
116
- ledger.transformer = lambda: _transformer
117
- ledger.video_encoder = lambda: _video_encoder
118
- ledger.video_decoder = lambda: _video_decoder
119
- ledger.audio_decoder = lambda: _audio_decoder
120
- ledger.vocoder = lambda: _vocoder
121
- ledger.spatial_upsampler = lambda: _spatial_upsampler
122
- ledger.text_encoder = lambda: _text_encoder
123
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
124
- print("All models preloaded (including Gemma text encoder)!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  print("=" * 80)
127
  print("Pipeline ready!")
@@ -136,14 +184,14 @@ def log_memory(tag: str):
136
  print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
137
 
138
 
139
- def detect_aspect_ratio(image) -> str:
140
- """Detect the closest aspect ratio (16:9, 9:16, or 1:1) from an image."""
141
- if image is None:
142
  return "16:9"
143
- if hasattr(image, "size"):
144
- w, h = image.size
145
- elif hasattr(image, "shape"):
146
- h, w = image.shape[:2]
147
  else:
148
  return "16:9"
149
  ratio = w / h
@@ -151,39 +199,55 @@ def detect_aspect_ratio(image) -> str:
151
  return min(candidates, key=lambda k: abs(ratio - candidates[k]))
152
 
153
 
154
- def on_image_upload(image, high_res):
155
- """Auto-set resolution when image is uploaded."""
156
- aspect = detect_aspect_ratio(image)
 
157
  tier = "high" if high_res else "low"
158
  w, h = RESOLUTIONS[tier][aspect]
159
  return gr.update(value=w), gr.update(value=h)
160
 
161
 
162
- def on_highres_toggle(image, high_res):
163
  """Update resolution when high-res toggle changes."""
164
- aspect = detect_aspect_ratio(image)
 
165
  tier = "high" if high_res else "low"
166
  w, h = RESOLUTIONS[tier][aspect]
167
  return gr.update(value=w), gr.update(value=h)
168
 
169
 
170
- @spaces.GPU(duration=75)
171
  @torch.inference_mode()
172
  def generate_video(
173
- input_image,
 
 
174
  prompt: str,
175
  duration: float,
176
- enhance_prompt: bool = True,
177
- seed: int = 42,
178
- randomize_seed: bool = True,
179
- height: int = 1024,
180
- width: int = 1536,
 
 
 
181
  progress=gr.Progress(track_tqdm=True),
182
  ):
 
 
183
  try:
184
  torch.cuda.reset_peak_memory_stats()
185
  log_memory("start")
186
 
 
 
 
 
 
 
 
187
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
188
 
189
  frame_rate = DEFAULT_FRAME_RATE
@@ -191,24 +255,42 @@ def generate_video(
191
  num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
192
 
193
  print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
 
 
 
 
194
 
 
195
  images = []
196
- if input_image is not None:
197
- output_dir = Path("outputs")
198
- output_dir.mkdir(exist_ok=True)
199
- temp_image_path = output_dir / f"temp_input_{current_seed}.jpg"
200
- if hasattr(input_image, "save"):
201
- input_image.save(temp_image_path)
202
  else:
203
- temp_image_path = Path(input_image)
204
- images = [ImageConditioningInput(path=str(temp_image_path), frame_idx=0, strength=1.0)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
  tiling_config = TilingConfig.default()
207
  video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
208
 
209
  log_memory("before pipeline call")
210
 
211
- video, audio = pipeline(
212
  prompt=prompt,
213
  seed=current_seed,
214
  height=int(height),
@@ -216,8 +298,11 @@ def generate_video(
216
  num_frames=num_frames,
217
  frame_rate=frame_rate,
218
  images=images,
 
219
  tiling_config=tiling_config,
220
  enhance_prompt=enhance_prompt,
 
 
221
  )
222
 
223
  log_memory("after pipeline call")
@@ -234,6 +319,8 @@ def generate_video(
234
  log_memory("after encode_video")
235
  return str(output_path), current_seed
236
 
 
 
237
  except Exception as e:
238
  import traceback
239
  log_memory("on error")
@@ -241,35 +328,55 @@ def generate_video(
241
  return None, current_seed
242
 
243
 
244
- with gr.Blocks(title="LTX-2.3 Distilled") as demo:
245
- gr.Markdown("# LTX-2.3 Distilled (22B): Fast Audio-Video Generation")
246
  gr.Markdown(
247
- "Fast and high quality video + audio generation"
 
 
 
248
  "[[model]](https://huggingface.co/Lightricks/LTX-2.3) "
249
  "[[code]](https://github.com/Lightricks/LTX-2)"
250
  )
251
 
252
  with gr.Row():
253
  with gr.Column():
254
- input_image = gr.Image(label="Input Image (Optional)", type="pil")
 
 
 
 
 
 
255
  prompt = gr.Textbox(
256
  label="Prompt",
257
- info="for best results - make it as elaborate as possible",
258
- value="Make this image come alive with cinematic motion, smooth animation",
259
  lines=3,
260
- placeholder="Describe the motion and animation you want...",
261
  )
262
-
263
  with gr.Row():
264
  duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=10.0, value=3.0, step=0.1)
 
 
 
 
 
 
 
 
 
 
265
  with gr.Column():
266
  enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
267
  high_res = gr.Checkbox(label="High Resolution", value=True)
 
268
 
269
  generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
270
 
271
  with gr.Accordion("Advanced Settings", open=False):
272
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
273
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
274
  with gr.Row():
275
  width = gr.Number(label="Width", value=1536, precision=0)
@@ -278,24 +385,29 @@ with gr.Blocks(title="LTX-2.3 Distilled") as demo:
278
  with gr.Column():
279
  output_video = gr.Video(label="Generated Video", autoplay=True)
280
 
281
- # Auto-detect aspect ratio from uploaded image and set resolution
282
- input_image.change(
283
- fn=on_image_upload,
284
- inputs=[input_image, high_res],
 
 
 
 
 
285
  outputs=[width, height],
286
  )
287
-
288
- # Update resolution when high-res toggle changes
289
  high_res.change(
290
  fn=on_highres_toggle,
291
- inputs=[input_image, high_res],
292
  outputs=[width, height],
293
  )
294
 
295
  generate_btn.click(
296
  fn=generate_video,
297
  inputs=[
298
- input_image, prompt, duration, enhance_prompt,
 
 
299
  seed, randomize_seed, height, width,
300
  ],
301
  outputs=[output_video, seed],
@@ -307,5 +419,4 @@ css = """
307
  """
308
 
309
  if __name__ == "__main__":
310
- demo.launch(theme=gr.themes.Citrus(), css=css)
311
-
 
42
  import numpy as np
43
  from huggingface_hub import hf_hub_download, snapshot_download
44
 
45
+ from ltx_core.loader import LoraPathStrengthAndSDOps
46
+ from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
47
  from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number
48
  from ltx_core.quantization import QuantizationPolicy
49
+ from ltx_pipelines.ic_lora import ICLoraPipeline
50
  from ltx_pipelines.utils.args import ImageConditioningInput
51
  from ltx_pipelines.utils.media_io import encode_video
52
 
 
63
  logging.getLogger().setLevel(logging.INFO)
64
 
65
  MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
66
  DEFAULT_FRAME_RATE = 24.0
67
 
68
  # Resolution presets: (width, height)
 
72
  }
73
 
74
  # Model repos
75
+ LTX_MODEL_REPO = "diffusers-internal-dev/ltx-23"
76
  GEMMA_REPO = "google/gemma-3-12b-it-qat-q4_0-unquantized"
77
 
78
+ # Available IC-LoRAs for LTX-2.3 (22B)
79
+ IC_LORA_OPTIONS = {
80
+ "Union Control (Depth + Canny)": {
81
+ "repo": "Lightricks/LTX-2.3-22b-IC-LoRA-Union-Control",
82
+ "filename": "ltx-2.3-22b-ic-lora-union-control-ref0.5.safetensors",
83
+ },
84
+ "Motion Track Control": {
85
+ "repo": "Lightricks/LTX-2.3-22b-IC-LoRA-Motion-Track-Control",
86
+ "filename": "ltx-2.3-22b-ic-lora-motion-track-control-ref0.5.safetensors",
87
+ },
88
+ }
89
+
90
  # Download model checkpoints
91
  print("=" * 80)
92
+ print("Downloading LTX-2.3 distilled model + Gemma + IC-LoRAs...")
93
  print("=" * 80)
94
 
95
  checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
96
  spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
97
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
98
 
99
+ # Pre-download all IC-LoRA checkpoints
100
+ ic_lora_paths = {}
101
+ for name, info in IC_LORA_OPTIONS.items():
102
+ path = hf_hub_download(repo_id=info["repo"], filename=info["filename"])
103
+ ic_lora_paths[name] = path
104
+ print(f"IC-LoRA '{name}': {path}")
105
+
106
  print(f"Checkpoint: {checkpoint_path}")
107
  print(f"Spatial upsampler: {spatial_upsampler_path}")
108
  print(f"Gemma root: {gemma_root}")
109
 
110
+ # Build initial pipeline with the first IC-LoRA
111
+ default_lora_name = "Union Control (Depth + Canny)"
112
+ default_lora_path = ic_lora_paths[default_lora_name]
113
+
114
+ current_pipeline = None
115
+ current_lora_name = None
 
 
116
 
117
+
118
+ def build_pipeline(lora_name: str) -> ICLoraPipeline:
119
+ """Build an ICLoraPipeline with the given IC-LoRA."""
120
+ lora_path = ic_lora_paths[lora_name]
121
+ lora = LoraPathStrengthAndSDOps(
122
+ path=lora_path,
123
+ strength=1.0,
124
+ sd_ops=LTXV_LORA_COMFY_RENAMING_MAP,
125
+ )
126
+ pipe = ICLoraPipeline(
127
+ distilled_checkpoint_path=checkpoint_path,
128
+ spatial_upsampler_path=spatial_upsampler_path,
129
+ gemma_root=gemma_root,
130
+ loras=[lora],
131
+ quantization=QuantizationPolicy.fp8_cast(),
132
+ )
133
+ return pipe
134
+
135
+
136
+ def preload_pipeline(pipe: ICLoraPipeline) -> None:
137
+ """Preload all models from both ledgers for ZeroGPU tensor packing."""
138
+ print("Preloading stage 1 models (with IC-LoRA)...")
139
+ s1 = pipe.stage_1_model_ledger
140
+ _s1_transformer = s1.transformer()
141
+ _s1_video_encoder = s1.video_encoder()
142
+ _s1_text_encoder = s1.text_encoder()
143
+ _s1_embeddings_processor = s1.gemma_embeddings_processor()
144
+
145
+ s1.transformer = lambda: _s1_transformer
146
+ s1.video_encoder = lambda: _s1_video_encoder
147
+ s1.text_encoder = lambda: _s1_text_encoder
148
+ s1.gemma_embeddings_processor = lambda: _s1_embeddings_processor
149
+
150
+ print("Preloading stage 2 models (without IC-LoRA)...")
151
+ s2 = pipe.stage_2_model_ledger
152
+ _s2_transformer = s2.transformer()
153
+ _s2_video_encoder = s2.video_encoder()
154
+ _s2_video_decoder = s2.video_decoder()
155
+ _s2_audio_decoder = s2.audio_decoder()
156
+ _s2_vocoder = s2.vocoder()
157
+ _s2_spatial_upsampler = s2.spatial_upsampler()
158
+
159
+ s2.transformer = lambda: _s2_transformer
160
+ s2.video_encoder = lambda: _s2_video_encoder
161
+ s2.video_decoder = lambda: _s2_video_decoder
162
+ s2.audio_decoder = lambda: _s2_audio_decoder
163
+ s2.vocoder = lambda: _s2_vocoder
164
+ s2.spatial_upsampler = lambda: _s2_spatial_upsampler
165
+
166
+ print("All models preloaded!")
167
+
168
+
169
+ print(f"Building initial pipeline with IC-LoRA: {default_lora_name}")
170
+ current_pipeline = build_pipeline(default_lora_name)
171
+ current_lora_name = default_lora_name
172
+ preload_pipeline(current_pipeline)
173
 
174
  print("=" * 80)
175
  print("Pipeline ready!")
 
184
  print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
185
 
186
 
187
+ def detect_aspect_ratio(media) -> str:
188
+ """Detect the closest aspect ratio from an image or video."""
189
+ if media is None:
190
  return "16:9"
191
+ if hasattr(media, "size"):
192
+ w, h = media.size
193
+ elif hasattr(media, "shape"):
194
+ h, w = media.shape[:2]
195
  else:
196
  return "16:9"
197
  ratio = w / h
 
199
  return min(candidates, key=lambda k: abs(ratio - candidates[k]))
200
 
201
 
202
+ def on_media_upload(first_image, last_image, high_res):
203
+ """Auto-set resolution when media is uploaded."""
204
+ ref = first_image if first_image is not None else last_image
205
+ aspect = detect_aspect_ratio(ref)
206
  tier = "high" if high_res else "low"
207
  w, h = RESOLUTIONS[tier][aspect]
208
  return gr.update(value=w), gr.update(value=h)
209
 
210
 
211
+ def on_highres_toggle(first_image, last_image, high_res):
212
  """Update resolution when high-res toggle changes."""
213
+ ref = first_image if first_image is not None else last_image
214
+ aspect = detect_aspect_ratio(ref)
215
  tier = "high" if high_res else "low"
216
  w, h = RESOLUTIONS[tier][aspect]
217
  return gr.update(value=w), gr.update(value=h)
218
 
219
 
220
+ @spaces.GPU(duration=120)
221
  @torch.inference_mode()
222
  def generate_video(
223
+ first_image,
224
+ last_image,
225
+ conditioning_video,
226
  prompt: str,
227
  duration: float,
228
+ ic_lora_choice: str,
229
+ conditioning_strength: float,
230
+ enhance_prompt: bool,
231
+ skip_stage_2: bool,
232
+ seed: int,
233
+ randomize_seed: bool,
234
+ height: int,
235
+ width: int,
236
  progress=gr.Progress(track_tqdm=True),
237
  ):
238
+ global current_pipeline, current_lora_name
239
+
240
  try:
241
  torch.cuda.reset_peak_memory_stats()
242
  log_memory("start")
243
 
244
+ # Rebuild pipeline if IC-LoRA changed
245
+ if ic_lora_choice != current_lora_name:
246
+ print(f"Switching IC-LoRA: {current_lora_name} → {ic_lora_choice}")
247
+ current_pipeline = build_pipeline(ic_lora_choice)
248
+ current_lora_name = ic_lora_choice
249
+ preload_pipeline(current_pipeline)
250
+
251
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
252
 
253
  frame_rate = DEFAULT_FRAME_RATE
 
255
  num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
256
 
257
  print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
258
+ print(f"IC-LoRA: {ic_lora_choice}, conditioning_strength: {conditioning_strength}")
259
+
260
+ output_dir = Path("outputs")
261
+ output_dir.mkdir(exist_ok=True)
262
 
263
+ # Build image conditionings (first / last frame)
264
  images = []
265
+ if first_image is not None:
266
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
267
+ if hasattr(first_image, "save"):
268
+ first_image.save(temp_first_path)
 
 
269
  else:
270
+ temp_first_path = Path(first_image)
271
+ images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
272
+
273
+ if last_image is not None:
274
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
275
+ if hasattr(last_image, "save"):
276
+ last_image.save(temp_last_path)
277
+ else:
278
+ temp_last_path = Path(last_image)
279
+ images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
280
+
281
+ # Build video conditioning for IC-LoRA (reference video)
282
+ video_conditioning = []
283
+ if conditioning_video is not None:
284
+ video_path = str(conditioning_video)
285
+ video_conditioning.append((video_path, conditioning_strength))
286
+ print(f"Video conditioning: {video_path} (strength={conditioning_strength})")
287
 
288
  tiling_config = TilingConfig.default()
289
  video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
290
 
291
  log_memory("before pipeline call")
292
 
293
+ video, audio = current_pipeline(
294
  prompt=prompt,
295
  seed=current_seed,
296
  height=int(height),
 
298
  num_frames=num_frames,
299
  frame_rate=frame_rate,
300
  images=images,
301
+ video_conditioning=video_conditioning,
302
  tiling_config=tiling_config,
303
  enhance_prompt=enhance_prompt,
304
+ conditioning_attention_strength=1.0,
305
+ skip_stage_2=skip_stage_2,
306
  )
307
 
308
  log_memory("after pipeline call")
 
319
  log_memory("after encode_video")
320
  return str(output_path), current_seed
321
 
322
+ except gr.Error:
323
+ raise
324
  except Exception as e:
325
  import traceback
326
  log_memory("on error")
 
328
  return None, current_seed
329
 
330
 
331
+ with gr.Blocks(title="LTX-2.3 IC-LoRA") as demo:
332
+ gr.Markdown("# LTX-2.3 IC-LoRA: Video-to-Video & Image-to-Video Control")
333
  gr.Markdown(
334
+ "Video-to-video transformations using IC-LoRA conditioning "
335
+ "(depth + canny union control, motion tracking). Upload a **conditioning video** "
336
+ "as the IC-LoRA reference signal, optionally pin first/last frame images, "
337
+ "and describe the desired output. "
338
  "[[model]](https://huggingface.co/Lightricks/LTX-2.3) "
339
  "[[code]](https://github.com/Lightricks/LTX-2)"
340
  )
341
 
342
  with gr.Row():
343
  with gr.Column():
344
+ conditioning_video = gr.Video(
345
+ label="Conditioning Video (IC-LoRA Reference)",
346
+ sources=["upload"],
347
+ )
348
+ with gr.Row():
349
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
350
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
351
  prompt = gr.Textbox(
352
  label="Prompt",
353
+ info="Describe the desired output the IC-LoRA controls structure from the reference",
354
+ value="A cinematic scene with dramatic lighting and rich detail, smooth motion",
355
  lines=3,
356
+ placeholder="Describe the video you want to generate...",
357
  )
358
+
359
  with gr.Row():
360
  duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=10.0, value=3.0, step=0.1)
361
+ ic_lora_choice = gr.Dropdown(
362
+ label="IC-LoRA",
363
+ choices=list(IC_LORA_OPTIONS.keys()),
364
+ value=default_lora_name,
365
+ )
366
+
367
+ with gr.Row():
368
+ conditioning_strength = gr.Slider(
369
+ label="Conditioning Strength", minimum=0.1, maximum=1.0, value=1.0, step=0.05,
370
+ )
371
  with gr.Column():
372
  enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
373
  high_res = gr.Checkbox(label="High Resolution", value=True)
374
+ skip_stage_2 = gr.Checkbox(label="Skip Stage 2 (faster, half res)", value=False)
375
 
376
  generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
377
 
378
  with gr.Accordion("Advanced Settings", open=False):
379
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=42, step=1)
380
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
381
  with gr.Row():
382
  width = gr.Number(label="Width", value=1536, precision=0)
 
385
  with gr.Column():
386
  output_video = gr.Video(label="Generated Video", autoplay=True)
387
 
388
+ # Auto-detect aspect ratio from uploaded images
389
+ first_image.change(
390
+ fn=on_media_upload,
391
+ inputs=[first_image, last_image, high_res],
392
+ outputs=[width, height],
393
+ )
394
+ last_image.change(
395
+ fn=on_media_upload,
396
+ inputs=[first_image, last_image, high_res],
397
  outputs=[width, height],
398
  )
 
 
399
  high_res.change(
400
  fn=on_highres_toggle,
401
+ inputs=[first_image, last_image, high_res],
402
  outputs=[width, height],
403
  )
404
 
405
  generate_btn.click(
406
  fn=generate_video,
407
  inputs=[
408
+ first_image, last_image, conditioning_video,
409
+ prompt, duration, ic_lora_choice, conditioning_strength,
410
+ enhance_prompt, skip_stage_2,
411
  seed, randomize_seed, height, width,
412
  ],
413
  outputs=[output_video, seed],
 
419
  """
420
 
421
  if __name__ == "__main__":
422
+ demo.launch(theme=gr.themes.Citrus(), css=css)