cpuai commited on
Commit
f23241b
·
verified ·
1 Parent(s): 737cc06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +208 -56
app.py CHANGED
@@ -1,13 +1,17 @@
1
  import os
2
  import subprocess
3
  import sys
 
4
 
5
  # Disable torch.compile / dynamo before any torch import
6
  os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
  os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
 
9
  # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
 
 
 
11
 
12
  # Clone LTX-2 repo and install packages
13
  LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
@@ -19,9 +23,11 @@ if not os.path.exists(LTX_REPO_DIR):
19
 
20
  print("Installing ltx-core and ltx-pipelines from cloned repo...")
21
  subprocess.run(
22
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
23
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
24
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
 
 
25
  check=True,
26
  )
27
 
@@ -61,31 +67,44 @@ except Exception as e:
61
  logging.getLogger().setLevel(logging.INFO)
62
 
63
  MAX_SEED = np.iinfo(np.int32).max
64
- DEFAULT_PROMPT = (
65
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
66
- "the shell cracking and peeling apart in gentle low-gravity motion. "
67
- "Fine lunar dust lifts and drifts outward with each movement, floating "
68
- "in slow arcs before settling back onto the ground."
69
- )
70
  DEFAULT_FRAME_RATE = 24.0
71
 
 
 
 
 
 
 
72
  # Resolution presets: (width, height)
73
  RESOLUTIONS = {
74
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
75
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
 
 
 
 
 
 
 
 
76
  }
77
 
78
  # Model repos
79
  LTX_MODEL_REPO = "Lightricks/LTX-2.3"
80
  GEMMA_REPO = "google/gemma-3-12b-it-qat-q4_0-unquantized"
81
 
82
- # Download model checkpoints
83
  print("=" * 80)
84
  print("Downloading LTX-2.3 distilled model + Gemma...")
85
  print("=" * 80)
86
 
87
- checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
88
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
 
 
 
 
 
 
89
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
90
 
91
  print(f"Checkpoint: {checkpoint_path}")
@@ -129,45 +148,107 @@ print("=" * 80)
129
 
130
 
131
  def log_memory(tag: str):
 
132
  if torch.cuda.is_available():
133
  allocated = torch.cuda.memory_allocated() / 1024**3
134
  peak = torch.cuda.max_memory_allocated() / 1024**3
135
  free, total = torch.cuda.mem_get_info()
136
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
 
 
 
 
 
 
137
 
138
 
139
  def detect_aspect_ratio(image) -> str:
140
- """Detect the closest aspect ratio (16:9, 9:16, or 1:1) from an image."""
141
  if image is None:
142
  return "16:9"
 
143
  if hasattr(image, "size"):
144
  w, h = image.size
145
  elif hasattr(image, "shape"):
146
  h, w = image.shape[:2]
147
  else:
148
  return "16:9"
 
149
  ratio = w / h
150
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
 
 
 
 
151
  return min(candidates, key=lambda k: abs(ratio - candidates[k]))
152
 
153
 
154
- def on_image_upload(image, high_res):
155
- """Auto-set resolution when image is uploaded."""
 
 
 
156
  aspect = detect_aspect_ratio(image)
157
- tier = "high" if high_res else "low"
158
- w, h = RESOLUTIONS[tier][aspect]
159
- return gr.update(value=w), gr.update(value=h)
160
 
 
 
 
 
 
161
 
162
- def on_highres_toggle(image, high_res):
163
- """Update resolution when high-res toggle changes."""
164
- aspect = detect_aspect_ratio(image)
165
- tier = "high" if high_res else "low"
166
  w, h = RESOLUTIONS[tier][aspect]
167
- return gr.update(value=w), gr.update(value=h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
 
 
 
169
 
170
- @spaces.GPU(duration=75)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  @torch.inference_mode()
172
  def generate_video(
173
  input_image,
@@ -178,30 +259,58 @@ def generate_video(
178
  randomize_seed: bool = True,
179
  height: int = 1024,
180
  width: int = 1536,
 
181
  progress=gr.Progress(track_tqdm=True),
182
  ):
 
 
183
  try:
184
  torch.cuda.reset_peak_memory_stats()
185
  log_memory("start")
186
 
187
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
188
-
189
  frame_rate = DEFAULT_FRAME_RATE
190
- num_frames = int(duration * frame_rate) + 1
191
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
192
-
193
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  images = []
196
  if input_image is not None:
197
  output_dir = Path("outputs")
198
  output_dir.mkdir(exist_ok=True)
199
  temp_image_path = output_dir / f"temp_input_{current_seed}.jpg"
 
200
  if hasattr(input_image, "save"):
201
  input_image.save(temp_image_path)
202
  else:
203
  temp_image_path = Path(input_image)
204
- images = [ImageConditioningInput(path=str(temp_image_path), frame_idx=0, strength=1.0)]
 
 
 
 
 
 
 
205
 
206
  tiling_config = TilingConfig.default()
207
  video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
@@ -217,7 +326,7 @@ def generate_video(
217
  frame_rate=frame_rate,
218
  images=images,
219
  tiling_config=tiling_config,
220
- enhance_prompt=enhance_prompt,
221
  )
222
 
223
  log_memory("after pipeline call")
@@ -232,26 +341,43 @@ def generate_video(
232
  )
233
 
234
  log_memory("after encode_video")
235
- return str(output_path), current_seed
 
 
236
 
237
  except Exception as e:
238
  import traceback
239
  log_memory("on error")
240
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
241
- return None, current_seed
 
 
 
 
 
 
 
 
 
 
 
242
 
243
 
244
  with gr.Blocks(title="LTX-2.3 Distilled") as demo:
245
  gr.Markdown("# LTX-2.3 Distilled (22B): Fast Audio-Video Generation")
246
  gr.Markdown(
247
- "Fast and high quality video + audio generation"
248
  "[[model]](https://huggingface.co/Lightricks/LTX-2.3) "
249
  "[[code]](https://github.com/Lightricks/LTX-2)"
250
  )
 
 
 
251
 
252
  with gr.Row():
253
  with gr.Column():
254
  input_image = gr.Image(label="Input Image (Optional)", type="pil")
 
255
  prompt = gr.Textbox(
256
  label="Prompt",
257
  info="for best results - make it as elaborate as possible",
@@ -259,9 +385,15 @@ with gr.Blocks(title="LTX-2.3 Distilled") as demo:
259
  lines=3,
260
  placeholder="Describe the motion and animation you want...",
261
  )
262
-
263
  with gr.Row():
264
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=10.0, value=3.0, step=0.1)
 
 
 
 
 
 
265
  with gr.Column():
266
  enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
267
  high_res = gr.Checkbox(label="High Resolution", value=True)
@@ -271,41 +403,61 @@ with gr.Blocks(title="LTX-2.3 Distilled") as demo:
271
  with gr.Accordion("Advanced Settings", open=False):
272
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
273
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
 
274
  with gr.Row():
275
  width = gr.Number(label="Width", value=1536, precision=0)
276
  height = gr.Number(label="Height", value=1024, precision=0)
277
 
 
 
 
 
 
 
 
278
  with gr.Column():
279
  output_video = gr.Video(label="Generated Video", autoplay=True)
280
 
281
- # Auto-detect aspect ratio from uploaded image and set resolution
282
  input_image.change(
283
  fn=on_image_upload,
284
- inputs=[input_image, high_res],
285
- outputs=[width, height],
286
  )
287
 
288
- # Update resolution when high-res toggle changes
289
  high_res.change(
290
  fn=on_highres_toggle,
291
- inputs=[input_image, high_res],
292
- outputs=[width, height],
 
 
 
 
 
 
 
293
  )
294
 
295
  generate_btn.click(
296
  fn=generate_video,
297
  inputs=[
298
- input_image, prompt, duration, enhance_prompt,
299
- seed, randomize_seed, height, width,
 
 
 
 
 
 
 
300
  ],
301
- outputs=[output_video, seed],
302
  )
303
 
304
-
305
  css = """
306
- .fillable{max-width: 1200px !important}
307
  """
308
 
309
  if __name__ == "__main__":
310
- demo.launch(theme=gr.themes.Citrus(), css=css)
311
-
 
1
  import os
2
  import subprocess
3
  import sys
4
+ import math
5
 
6
  # Disable torch.compile / dynamo before any torch import
7
  os.environ["TORCH_COMPILE_DISABLE"] = "1"
8
  os.environ["TORCHDYNAMO_DISABLE"] = "1"
9
 
10
  # Install xformers for memory-efficient attention
11
+ subprocess.run(
12
+ [sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"],
13
+ check=False
14
+ )
15
 
16
  # Clone LTX-2 repo and install packages
17
  LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
 
23
 
24
  print("Installing ltx-core and ltx-pipelines from cloned repo...")
25
  subprocess.run(
26
+ [
27
+ sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
28
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
29
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")
30
+ ],
31
  check=True,
32
  )
33
 
 
67
  logging.getLogger().setLevel(logging.INFO)
68
 
69
  MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
70
  DEFAULT_FRAME_RATE = 24.0
71
 
72
+ # LTX-2.3 官方单次最长 20 秒
73
+ MAX_DURATION_SECONDS = 20.0
74
+
75
+ # 为了降低 20 秒长视频时的失败率,做一个长视频阈值
76
+ LONG_VIDEO_THRESHOLD = 10.0
77
+
78
  # Resolution presets: (width, height)
79
  RESOLUTIONS = {
80
+ "high": {
81
+ "16:9": (1536, 1024),
82
+ "9:16": (1024, 1536),
83
+ "1:1": (1024, 1024),
84
+ },
85
+ "low": {
86
+ "16:9": (768, 512),
87
+ "9:16": (512, 768),
88
+ "1:1": (768, 768),
89
+ },
90
  }
91
 
92
  # Model repos
93
  LTX_MODEL_REPO = "Lightricks/LTX-2.3"
94
  GEMMA_REPO = "google/gemma-3-12b-it-qat-q4_0-unquantized"
95
 
 
96
  print("=" * 80)
97
  print("Downloading LTX-2.3 distilled model + Gemma...")
98
  print("=" * 80)
99
 
100
+ checkpoint_path = hf_hub_download(
101
+ repo_id=LTX_MODEL_REPO,
102
+ filename="ltx-2.3-22b-distilled.safetensors"
103
+ )
104
+ spatial_upsampler_path = hf_hub_download(
105
+ repo_id=LTX_MODEL_REPO,
106
+ filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors"
107
+ )
108
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
109
 
110
  print(f"Checkpoint: {checkpoint_path}")
 
148
 
149
 
150
  def log_memory(tag: str):
151
+ """打印显存信息,便于排查长视频生成问题。"""
152
  if torch.cuda.is_available():
153
  allocated = torch.cuda.memory_allocated() / 1024**3
154
  peak = torch.cuda.max_memory_allocated() / 1024**3
155
  free, total = torch.cuda.mem_get_info()
156
+ print(
157
+ f"[VRAM {tag}] "
158
+ f"allocated={allocated:.2f}GB "
159
+ f"peak={peak:.2f}GB "
160
+ f"free={free / 1024**3:.2f}GB "
161
+ f"total={total / 1024**3:.2f}GB"
162
+ )
163
 
164
 
165
  def detect_aspect_ratio(image) -> str:
166
+ """根据输入图像自动匹配最接近的宽高比。"""
167
  if image is None:
168
  return "16:9"
169
+
170
  if hasattr(image, "size"):
171
  w, h = image.size
172
  elif hasattr(image, "shape"):
173
  h, w = image.shape[:2]
174
  else:
175
  return "16:9"
176
+
177
  ratio = w / h
178
+ candidates = {
179
+ "16:9": 16 / 9,
180
+ "9:16": 9 / 16,
181
+ "1:1": 1.0,
182
+ }
183
  return min(candidates, key=lambda k: abs(ratio - candidates[k]))
184
 
185
 
186
+ def get_resolution_by_state(image, high_res: bool, duration: float):
187
+ """
188
+ 根据图片比例、分辨率开关、时长,返回最终建议分辨率。
189
+ 为了让 20 秒视频更稳定,长视频强制降到 low preset。
190
+ """
191
  aspect = detect_aspect_ratio(image)
 
 
 
192
 
193
+ # 10秒以上统一走 low,显著降低 OOM 和超时概率
194
+ if duration > LONG_VIDEO_THRESHOLD:
195
+ tier = "low"
196
+ else:
197
+ tier = "high" if high_res else "low"
198
 
 
 
 
 
199
  w, h = RESOLUTIONS[tier][aspect]
200
+ return w, h, tier, aspect
201
+
202
+
203
+ def on_image_upload(image, high_res, duration):
204
+ """上传图片后,自动设置分辨率。"""
205
+ w, h, tier, aspect = get_resolution_by_state(image, bool(high_res), float(duration))
206
+ tip = f"已自动匹配比例 {aspect},当前使用 {tier} 分辨率:{w}×{h}"
207
+ return gr.update(value=w), gr.update(value=h), gr.update(value=tip)
208
+
209
+
210
+ def on_highres_toggle(image, high_res, duration):
211
+ """切换高分辨率开关时,联动分辨率。"""
212
+ w, h, tier, aspect = get_resolution_by_state(image, bool(high_res), float(duration))
213
+ if float(duration) > LONG_VIDEO_THRESHOLD and bool(high_res):
214
+ tip = f"当前时长 {duration:.1f}s,已为稳定性自动降为 low 分辨率:{w}×{h}"
215
+ else:
216
+ tip = f"已自动匹配比例 {aspect},当前使用 {tier} 分辨率:{w}×{h}"
217
+ return gr.update(value=w), gr.update(value=h), gr.update(value=tip)
218
+
219
+
220
+ def on_duration_change(image, high_res, duration):
221
+ """切换时长时,也同步调整分辨率策略。"""
222
+ w, h, tier, aspect = get_resolution_by_state(image, bool(high_res), float(duration))
223
+ if float(duration) > LONG_VIDEO_THRESHOLD:
224
+ tip = (
225
+ f"当前时长 {duration:.1f}s,已自动切换到 low 分辨率 {w}×{h},"
226
+ f"以降低显存占用和超时风险。"
227
+ )
228
+ else:
229
+ tip = f"当前时长 {duration:.1f}s,比例 {aspect},使用 {tier} 分辨率:{w}×{h}"
230
+ return gr.update(value=w), gr.update(value=h), gr.update(value=tip)
231
+
232
 
233
+ def clamp_int(v, min_v, max_v):
234
+ """整数安全钳制。"""
235
+ return max(min_v, min(int(v), max_v))
236
 
237
+
238
+ def align_num_frames(duration: float, frame_rate: float) -> int:
239
+ """
240
+ 将帧数对齐到 LTX 常用的 8n+1 形式。
241
+ 例如:
242
+ 20秒 * 24fps = 480 帧
243
+ 对齐后为 481 帧
244
+ """
245
+ raw_frames = int(duration * frame_rate) + 1
246
+ aligned_frames = ((raw_frames - 1 + 7) // 8) * 8 + 1
247
+ return aligned_frames
248
+
249
+
250
+ # 20 秒视频推理时间明显更长,因此把 GPU duration 提高
251
+ @spaces.GPU(duration=240)
252
  @torch.inference_mode()
253
  def generate_video(
254
  input_image,
 
259
  randomize_seed: bool = True,
260
  height: int = 1024,
261
  width: int = 1536,
262
+ high_res: bool = True,
263
  progress=gr.Progress(track_tqdm=True),
264
  ):
265
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
266
+
267
  try:
268
  torch.cuda.reset_peak_memory_stats()
269
  log_memory("start")
270
 
271
+ # ---------- 参数安全限制 ----------
272
+ duration = max(1.0, min(float(duration), MAX_DURATION_SECONDS))
273
  frame_rate = DEFAULT_FRAME_RATE
274
+ num_frames = align_num_frames(duration, frame_rate)
275
+
276
+ # 宽高做整数与边界保护
277
+ width = clamp_int(width, 256, 2048)
278
+ height = clamp_int(height, 256, 2048)
279
+
280
+ # 长视频时自动降级分辨率,提高成功率
281
+ safe_w, safe_h, safe_tier, safe_aspect = get_resolution_by_state(input_image, bool(high_res), duration)
282
+ if duration > LONG_VIDEO_THRESHOLD:
283
+ if width != safe_w or height != safe_h:
284
+ print(
285
+ f"[SAFE] Long video detected ({duration:.1f}s). "
286
+ f"Override resolution from {width}x{height} to {safe_w}x{safe_h}"
287
+ )
288
+ width, height = safe_w, safe_h
289
+
290
+ print(
291
+ f"Generating: {height}x{width}, "
292
+ f"{num_frames} frames ({duration:.1f}s), "
293
+ f"seed={current_seed}, high_res={high_res}, safe_tier={safe_tier}"
294
+ )
295
 
296
  images = []
297
  if input_image is not None:
298
  output_dir = Path("outputs")
299
  output_dir.mkdir(exist_ok=True)
300
  temp_image_path = output_dir / f"temp_input_{current_seed}.jpg"
301
+
302
  if hasattr(input_image, "save"):
303
  input_image.save(temp_image_path)
304
  else:
305
  temp_image_path = Path(input_image)
306
+
307
+ images = [
308
+ ImageConditioningInput(
309
+ path=str(temp_image_path),
310
+ frame_idx=0,
311
+ strength=1.0
312
+ )
313
+ ]
314
 
315
  tiling_config = TilingConfig.default()
316
  video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
 
326
  frame_rate=frame_rate,
327
  images=images,
328
  tiling_config=tiling_config,
329
+ enhance_prompt=bool(enhance_prompt),
330
  )
331
 
332
  log_memory("after pipeline call")
 
341
  )
342
 
343
  log_memory("after encode_video")
344
+ return str(output_path), current_seed, (
345
+ f"生成成功:{duration:.1f} 秒,{num_frames} 帧,输出分辨率 {width}×{height}"
346
+ )
347
 
348
  except Exception as e:
349
  import traceback
350
  log_memory("on error")
351
+ err = f"{type(e).__name__}: {str(e)}"
352
+ print(f"Error: {err}\n{traceback.format_exc()}")
353
+
354
+ user_msg = (
355
+ "生成失败。\n"
356
+ f"错误:{err}\n\n"
357
+ "建议:\n"
358
+ "1. 20秒视频请优先使用低分辨率\n"
359
+ "2. 先关闭 High Resolution\n"
360
+ "3. 输入图尽量简单,减少复杂运动\n"
361
+ "4. 如在 ZeroGPU / Hugging Face Space 上运行,长视频可能仍会因排队或时限失败"
362
+ )
363
+ return None, current_seed, user_msg
364
 
365
 
366
  with gr.Blocks(title="LTX-2.3 Distilled") as demo:
367
  gr.Markdown("# LTX-2.3 Distilled (22B): Fast Audio-Video Generation")
368
  gr.Markdown(
369
+ "Fast and high quality video + audio generation \n"
370
  "[[model]](https://huggingface.co/Lightricks/LTX-2.3) "
371
  "[[code]](https://github.com/Lightricks/LTX-2)"
372
  )
373
+ gr.Markdown(
374
+ "说明:已支持最长 20 秒视频。为提高成功率,超过 10 秒时会自动切换为低分辨率。"
375
+ )
376
 
377
  with gr.Row():
378
  with gr.Column():
379
  input_image = gr.Image(label="Input Image (Optional)", type="pil")
380
+
381
  prompt = gr.Textbox(
382
  label="Prompt",
383
  info="for best results - make it as elaborate as possible",
 
385
  lines=3,
386
  placeholder="Describe the motion and animation you want...",
387
  )
388
+
389
  with gr.Row():
390
+ duration = gr.Slider(
391
+ label="Duration (seconds)",
392
+ minimum=1.0,
393
+ maximum=20.0, # 改为 20 秒
394
+ value=3.0,
395
+ step=0.1
396
+ )
397
  with gr.Column():
398
  enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
399
  high_res = gr.Checkbox(label="High Resolution", value=True)
 
403
  with gr.Accordion("Advanced Settings", open=False):
404
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
405
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
406
+
407
  with gr.Row():
408
  width = gr.Number(label="Width", value=1536, precision=0)
409
  height = gr.Number(label="Height", value=1024, precision=0)
410
 
411
+ status_text = gr.Textbox(
412
+ label="Status",
413
+ value="就绪",
414
+ interactive=False,
415
+ lines=4
416
+ )
417
+
418
  with gr.Column():
419
  output_video = gr.Video(label="Generated Video", autoplay=True)
420
 
421
+ # 上传图片时自动调整
422
  input_image.change(
423
  fn=on_image_upload,
424
+ inputs=[input_image, high_res, duration],
425
+ outputs=[width, height, status_text],
426
  )
427
 
428
+ # 切换高分辨率时自动调整
429
  high_res.change(
430
  fn=on_highres_toggle,
431
+ inputs=[input_image, high_res, duration],
432
+ outputs=[width, height, status_text],
433
+ )
434
+
435
+ # 切换时长时自动调整
436
+ duration.change(
437
+ fn=on_duration_change,
438
+ inputs=[input_image, high_res, duration],
439
+ outputs=[width, height, status_text],
440
  )
441
 
442
  generate_btn.click(
443
  fn=generate_video,
444
  inputs=[
445
+ input_image,
446
+ prompt,
447
+ duration,
448
+ enhance_prompt,
449
+ seed,
450
+ randomize_seed,
451
+ height,
452
+ width,
453
+ high_res,
454
  ],
455
+ outputs=[output_video, seed, status_text],
456
  )
457
 
 
458
  css = """
459
+ .fillable {max-width: 1200px !important;}
460
  """
461
 
462
  if __name__ == "__main__":
463
+ demo.launch(theme=gr.themes.Citrus(), css=css)