Fabrice-TIERCELIN commited on
Commit
d0d4fd0
·
verified ·
1 Parent(s): 063a766
Files changed (1) hide show
  1. app_v2v.py +90 -95
app_v2v.py CHANGED
@@ -46,48 +46,48 @@ from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Hunyuan
46
  if torch.cuda.device_count() > 0:
47
  free_mem_gb = get_cuda_free_memory_gb(gpu)
48
  high_vram = free_mem_gb > 60
49
-
50
  print(f'Free VRAM {free_mem_gb} GB')
51
  print(f'High-VRAM Mode: {high_vram}')
52
-
53
-
54
-
55
  text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
56
  text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
57
  tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
58
  tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
59
  vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
60
-
61
  feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
62
  image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
63
-
64
  transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
65
-
66
  vae.eval()
67
  text_encoder.eval()
68
  text_encoder_2.eval()
69
  image_encoder.eval()
70
  transformer.eval()
71
-
72
  if not high_vram:
73
  vae.enable_slicing()
74
  vae.enable_tiling()
75
-
76
  transformer.high_quality_fp32_output_for_inference = True
77
  print('transformer.high_quality_fp32_output_for_inference = True')
78
-
79
  transformer.to(dtype=torch.bfloat16)
80
  vae.to(dtype=torch.float16)
81
  image_encoder.to(dtype=torch.float16)
82
  text_encoder.to(dtype=torch.float16)
83
  text_encoder_2.to(dtype=torch.float16)
84
-
85
  vae.requires_grad_(False)
86
  text_encoder.requires_grad_(False)
87
  text_encoder_2.requires_grad_(False)
88
  image_encoder.requires_grad_(False)
89
  transformer.requires_grad_(False)
90
-
91
  if not high_vram:
92
  # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
93
  DynamicSwapInstaller.install_model(transformer, device=gpu)
@@ -104,23 +104,21 @@ stream = AsyncStream()
104
  outputs_folder = './outputs/'
105
  os.makedirs(outputs_folder, exist_ok=True)
106
 
107
- input_video_debug_value = None
108
- prompt_debug_value = None
109
- total_second_length_debug_value = None
110
 
111
  @spaces.GPU()
112
  @torch.no_grad()
113
  def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
114
  """
115
  Encode a video into latent representations using the VAE.
116
-
117
  Args:
118
  video_path: Path to the input video file.
119
  vae: AutoencoderKLHunyuanVideo model.
120
  height, width: Target resolution for resizing frames.
121
  vae_batch_size: Number of frames to process per batch.
122
  device: Device for computation (e.g., "cuda").
123
-
124
  Returns:
125
  start_latent: Latent of the first frame (for compatibility with original code).
126
  input_image_np: First frame as numpy array (for CLIP vision encoding).
@@ -159,11 +157,11 @@ def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, devi
159
  # 20250506 pftq: Get native video resolution
160
  native_height, native_width = frames.shape[1], frames.shape[2]
161
  print(f"Native video resolution: {native_width}x{native_height}")
162
-
163
  # 20250506 pftq: Use native resolution if height/width not specified, otherwise use provided values
164
  target_height = native_height if height is None else height
165
  target_width = native_width if width is None else width
166
-
167
  # 20250506 pftq: Adjust to nearest bucket for model compatibility
168
  if not no_resize:
169
  target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
@@ -190,7 +188,7 @@ def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, devi
190
  frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
191
  frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
192
  print(f"Tensor shape: {frames_pt.shape}")
193
-
194
  # 20250507 pftq: Save pixel frames for use in worker
195
  input_video_pixels = frames_pt.cpu()
196
 
@@ -229,7 +227,7 @@ def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, devi
229
  if device == "cuda" and "out of memory" in str(e).lower():
230
  print("CUDA out of memory, try reducing vae_batch_size or using CPU")
231
  raise
232
-
233
  # 20250506 pftq: Concatenate latents
234
  print("Concatenating latents...")
235
  history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
@@ -256,15 +254,15 @@ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
256
  try:
257
  # Get the path to the bundled FFmpeg binary from imageio-ffmpeg
258
  ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
259
-
260
  # Check if input file exists
261
  if not os.path.exists(input_file):
262
  print(f"Error: Input file {input_file} does not exist")
263
  return False
264
-
265
  # Create a temporary file path
266
  temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
267
-
268
  # FFmpeg command using the bundled binary
269
  command = [
270
  ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
@@ -275,10 +273,10 @@ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
275
  '-y', # overwrite output file if it exists
276
  temp_file # temporary output file
277
  ]
278
-
279
  # Run the FFmpeg command
280
  result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
281
-
282
  if result.returncode == 0:
283
  # Replace the original file with the modified one
284
  shutil.move(temp_file, input_file)
@@ -290,7 +288,7 @@ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
290
  os.remove(temp_file)
291
  print(f"Error: FFmpeg failed with message:\n{result.stderr}")
292
  return False
293
-
294
  except Exception as e:
295
  # Clean up temp file in case of other errors
296
  if 'temp_file' in locals() and os.path.exists(temp_file):
@@ -301,8 +299,8 @@ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
301
  # 20250506 pftq: Modified worker to accept video input and clean frame count
302
  @spaces.GPU()
303
  @torch.no_grad()
304
- def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
305
-
306
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
307
 
308
  try:
@@ -338,7 +336,7 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
338
  #start_latent, input_image_np, history_latents, fps = video_encode(input_video, vae, height, width, vae_batch_size=16, device=gpu)
339
  start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
340
 
341
- #Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
342
 
343
  # CLIP Vision
344
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
@@ -362,60 +360,60 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
362
  for idx in range(batch):
363
  if batch > 1:
364
  print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
365
-
366
  #job_id = generate_timestamp()
367
  job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepackf1-videoinput_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}" # 20250506 pftq: easier to read timestamp and filename
368
-
369
  # Sampling
370
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
371
-
372
  rnd = torch.Generator("cpu").manual_seed(seed)
373
-
374
  # 20250506 pftq: Initialize history_latents with video latents
375
  history_latents = video_latents.cpu()
376
  total_generated_latent_frames = history_latents.shape[2]
377
  # 20250506 pftq: Initialize history_pixels to fix UnboundLocalError
378
  history_pixels = None
379
  previous_video = None
380
-
381
  # 20250507 pftq: hot fix for initial video being corrupted by vae encoding, issue with ghosting because of slight differences
382
- #history_pixels = input_video_pixels
383
  #save_bcthw_as_mp4(vae_decode(video_latents, vae).cpu(), os.path.join(outputs_folder, f'{job_id}_input_video.mp4'), fps=fps, crf=mp4_crf) # 20250507 pftq: test fast movement corrupted by vae encoding if vae batch size too low
384
-
385
  for section_index in range(total_latent_sections):
386
  if stream.input_queue.top() == 'end':
387
  stream.output_queue.push(('end', None))
388
  return
389
-
390
  print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
391
-
392
  if not high_vram:
393
  unload_complete_models()
394
  move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
395
-
396
  if use_teacache:
397
  transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
398
  else:
399
  transformer.initialize_teacache(enable_teacache=False)
400
-
401
  def callback(d):
402
  preview = d['denoised']
403
  preview = vae_decode_fake(preview)
404
-
405
  preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
406
  preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
407
-
408
  if stream.input_queue.top() == 'end':
409
  stream.output_queue.push(('end', None))
410
  raise KeyboardInterrupt('User ends the task.')
411
-
412
  current_step = d['i'] + 1
413
  percentage = int(100.0 * current_step / steps)
414
  hint = f'Sampling {current_step}/{steps}'
415
  desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
416
  stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
417
  return
418
-
419
  # 20250506 pftq: Use user-specified number of context frames, matching original allocation for num_clean_frames=2
420
  available_frames = history_latents.shape[2] # Number of latent frames
421
  max_pixel_frames = min(latent_window_size * 4 - 3, available_frames * 4) # Cap at available pixel frames
@@ -425,7 +423,7 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
425
  effective_clean_frames = min(effective_clean_frames, available_frames - 2) if available_frames > 2 else 0 # 20250507 pftq: changed 1 to 2 for edge case for <=1 sec videos
426
  num_2x_frames = min(2, max(1, available_frames - effective_clean_frames - 1)) if available_frames > effective_clean_frames + 1 else 0 # 20250507 pftq: subtracted 1 for edge case for <=1 sec videos
427
  num_4x_frames = min(16, max(1, available_frames - effective_clean_frames - num_2x_frames)) if available_frames > effective_clean_frames + num_2x_frames else 0 # 20250507 pftq: Edge case for <=1 sec
428
-
429
  total_context_frames = num_4x_frames + num_2x_frames + effective_clean_frames
430
  total_context_frames = min(total_context_frames, available_frames) # 20250507 pftq: Edge case for <=1 sec videos
431
 
@@ -434,12 +432,12 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
434
  [1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames], dim=1 # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
435
  )
436
  clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
437
-
438
  # 20250506 pftq: Split history_latents dynamically based on available frames
439
  fallback_frame_count = 2 # 20250507 pftq: Changed 0 to 2 Edge case for <=1 sec videos
440
- context_frames = history_latents[:, :, -total_context_frames:, :, :] if total_context_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
441
  if total_context_frames > 0:
442
- split_sizes = [num_4x_frames, num_2x_frames, effective_clean_frames]
443
  split_sizes = [s for s in split_sizes if s > 0] # Remove zero sizes
444
  if split_sizes:
445
  splits = context_frames.split(split_sizes, dim=2)
@@ -452,12 +450,12 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
452
  if clean_latents_2x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
453
  clean_latents_2x = torch.cat([clean_latents_2x, clean_latents_2x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
454
  split_idx += 1 if num_2x_frames > 0 else 0
455
- clean_latents_1x = splits[split_idx] if effective_clean_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
456
  else:
457
- clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
458
  else:
459
- clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
460
-
461
  clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
462
 
463
  # 20250507 pftq: Fix for <=1 sec videos.
@@ -492,42 +490,42 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
492
  clean_latent_4x_indices=clean_latent_4x_indices,
493
  callback=callback,
494
  )
495
-
496
  total_generated_latent_frames += int(generated_latents.shape[2])
497
  history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
498
-
499
  if not high_vram:
500
  offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
501
  load_model_as_complete(vae, target_device=gpu)
502
-
503
  real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
504
-
505
  if history_pixels is None:
506
  history_pixels = vae_decode(real_history_latents, vae).cpu()
507
  else:
508
  section_latent_frames = latent_window_size * 2
509
  overlapped_frames = min(latent_window_size * 4 - 3, history_pixels.shape[2])
510
-
511
- #if section_index == 0:
512
  #extra_latents = 1 # Add up to 2 extra latent frames for smoother overlap to initial video
513
  #extra_pixel_frames = extra_latents * 4 # Approx. 4 pixel frames per latent
514
  #overlapped_frames = min(overlapped_frames + extra_pixel_frames, history_pixels.shape[2], section_latent_frames * 4)
515
 
516
  current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
517
  history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
518
-
519
  if not high_vram:
520
  unload_complete_models()
521
-
522
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
523
-
524
  # 20250506 pftq: Use input video FPS for output
525
  save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
526
  print(f"Latest video saved: {output_filename}")
527
  # 20250508 pftq: Save prompt to mp4 metadata comments
528
  set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
529
  print(f"Prompt saved to mp4 metadata comments: {output_filename}")
530
-
531
  # 20250506 pftq: Clean up previous partial files
532
  if previous_video is not None and os.path.exists(previous_video):
533
  try:
@@ -536,9 +534,9 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
536
  except Exception as e:
537
  print(f"Error deleting previous partial video {previous_video}: {e}")
538
  previous_video = output_filename
539
-
540
  print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
541
-
542
  stream.output_queue.push(('file', output_filename))
543
 
544
  seed = (seed + 1) % np.iinfo(np.int32).max
@@ -553,16 +551,16 @@ def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_
553
 
554
  stream.output_queue.push(('end', None))
555
  return
556
-
557
- def get_duration(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
558
  global total_second_length_debug_value
559
  if total_second_length_debug_value is not None:
560
- return min(total_second_length_debug_value * 60 * 2, 600)
561
- return total_second_length * 60 * 2
562
 
563
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
564
- @spaces.GPU(duration=get_duration)
565
- def process(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
566
  global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
567
 
568
  if torch.cuda.device_count() == 0:
@@ -601,11 +599,11 @@ def process(input_video, prompt, n_prompt, randomize_seed, seed, batch, resoluti
601
  # 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
602
  if cfg > 1:
603
  gs = 1
604
-
605
  stream = AsyncStream()
606
 
607
  # 20250506 pftq: Pass num_clean_frames, vae_batch, etc
608
- async_run(worker, input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
609
 
610
  output_filename = None
611
 
@@ -628,11 +626,6 @@ def process(input_video, prompt, n_prompt, randomize_seed, seed, batch, resoluti
628
  def end_process():
629
  stream.input_queue.push('end')
630
 
631
- quick_prompts = [
632
- 'The girl dances gracefully, with clear movements, full of charm.',
633
- 'A character doing some simple body movements.',
634
- ]
635
- quick_prompts = [[x] for x in quick_prompts]
636
 
637
  css = make_progress_bar_css()
638
  block = gr.Blocks(css=css).queue()
@@ -641,23 +634,25 @@ with block:
641
  with gr.Row():
642
  gr.HTML("""
643
  <p style="background-color: red;"><big><big><big><b>⚠️To use FramePack, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
644
-
645
  You can't use FramePack directly here because this space runs on a CPU, which is not enough for FramePack. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues.
646
  </big></big></big></p>
647
  """)
648
  # 20250506 pftq: Updated title to reflect video input functionality
649
  gr.Markdown('# Framepack F1 with Image Input or with Video Input (Video Extension)')
 
 
 
 
 
650
  with gr.Row():
651
  with gr.Column():
652
- # 20250506 pftq: Changed to Video input from Image
653
  input_video = gr.Video(sources='upload', label="Input Video", height=320)
654
  prompt = gr.Textbox(label="Prompt", value='')
655
- #example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
656
- #example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
657
 
658
  with gr.Row():
659
- start_button = gr.Button(value="Start Generation")
660
- end_button = gr.Button(value="End Generation", interactive=False)
661
 
662
  with gr.Accordion("Advanced settings", open=False):
663
  with gr.Row():
@@ -672,33 +667,33 @@ with block:
672
  resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False)
673
 
674
  total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
675
-
676
  # 20250506 pftq: Reduced default distilled guidance scale to improve adherence to input video
677
  gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.')
678
  cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use this instead of Distilled for more detail/control + Negative Prompt (make sure Distilled set to 1). Doubles render time.') # Should not change
679
  rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
680
 
681
- n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
682
  steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Increase for more quality, especially if using high non-distilled CFG.')
683
 
684
  # 20250506 pftq: Renamed slider to Number of Context Frames and updated description
685
  num_clean_frames = gr.Slider(label="Number of Context Frames", minimum=2, maximum=10, value=5, step=1, info="Retain more video details but increase memory use. Reduce to 2 if memory issues.")
686
-
687
  default_vae = 32
688
  if high_vram:
689
  default_vae = 128
690
  elif free_mem_gb>=20:
691
  default_vae = 64
692
-
693
  vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Reduce if running out of memory. Increase for better quality frames during fast motion.")
694
 
695
- latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=33, value=9, step=1, visible=True, info='Generate more frames at a time (larger chunks). Less degradation and better blending but higher VRAM cost.')
696
 
697
  gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
698
 
699
  mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
700
 
701
- with gr.Row():
702
  input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
703
  prompt_debug = gr.Textbox(label="Prompt Debug", value='')
704
  total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
@@ -735,19 +730,19 @@ with block:
735
  ],
736
  ],
737
  run_on_click = True,
738
- fn = process,
739
  inputs = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch],
740
  outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
741
  cache_examples = True,
742
  )
743
 
744
- gr.HTML("""
745
- <div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>
746
- """)
747
 
748
  # 20250506 pftq: Updated inputs to include num_clean_frames
749
  ips = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
750
- start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
751
  end_button.click(fn=end_process)
752
 
753
 
 
46
  if torch.cuda.device_count() > 0:
47
  free_mem_gb = get_cuda_free_memory_gb(gpu)
48
  high_vram = free_mem_gb > 60
49
+
50
  print(f'Free VRAM {free_mem_gb} GB')
51
  print(f'High-VRAM Mode: {high_vram}')
52
+
53
+
54
+
55
  text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
56
  text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
57
  tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
58
  tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
59
  vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
60
+
61
  feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
62
  image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
63
+
64
  transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
65
+
66
  vae.eval()
67
  text_encoder.eval()
68
  text_encoder_2.eval()
69
  image_encoder.eval()
70
  transformer.eval()
71
+
72
  if not high_vram:
73
  vae.enable_slicing()
74
  vae.enable_tiling()
75
+
76
  transformer.high_quality_fp32_output_for_inference = True
77
  print('transformer.high_quality_fp32_output_for_inference = True')
78
+
79
  transformer.to(dtype=torch.bfloat16)
80
  vae.to(dtype=torch.float16)
81
  image_encoder.to(dtype=torch.float16)
82
  text_encoder.to(dtype=torch.float16)
83
  text_encoder_2.to(dtype=torch.float16)
84
+
85
  vae.requires_grad_(False)
86
  text_encoder.requires_grad_(False)
87
  text_encoder_2.requires_grad_(False)
88
  image_encoder.requires_grad_(False)
89
  transformer.requires_grad_(False)
90
+
91
  if not high_vram:
92
  # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
93
  DynamicSwapInstaller.install_model(transformer, device=gpu)
 
104
  outputs_folder = './outputs/'
105
  os.makedirs(outputs_folder, exist_ok=True)
106
 
107
+ input_image_debug_value = prompt_debug_value = total_second_length_debug_value = None
 
 
108
 
109
  @spaces.GPU()
110
  @torch.no_grad()
111
  def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
112
  """
113
  Encode a video into latent representations using the VAE.
114
+
115
  Args:
116
  video_path: Path to the input video file.
117
  vae: AutoencoderKLHunyuanVideo model.
118
  height, width: Target resolution for resizing frames.
119
  vae_batch_size: Number of frames to process per batch.
120
  device: Device for computation (e.g., "cuda").
121
+
122
  Returns:
123
  start_latent: Latent of the first frame (for compatibility with original code).
124
  input_image_np: First frame as numpy array (for CLIP vision encoding).
 
157
  # 20250506 pftq: Get native video resolution
158
  native_height, native_width = frames.shape[1], frames.shape[2]
159
  print(f"Native video resolution: {native_width}x{native_height}")
160
+
161
  # 20250506 pftq: Use native resolution if height/width not specified, otherwise use provided values
162
  target_height = native_height if height is None else height
163
  target_width = native_width if width is None else width
164
+
165
  # 20250506 pftq: Adjust to nearest bucket for model compatibility
166
  if not no_resize:
167
  target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
 
188
  frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
189
  frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
190
  print(f"Tensor shape: {frames_pt.shape}")
191
+
192
  # 20250507 pftq: Save pixel frames for use in worker
193
  input_video_pixels = frames_pt.cpu()
194
 
 
227
  if device == "cuda" and "out of memory" in str(e).lower():
228
  print("CUDA out of memory, try reducing vae_batch_size or using CPU")
229
  raise
230
+
231
  # 20250506 pftq: Concatenate latents
232
  print("Concatenating latents...")
233
  history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
 
254
  try:
255
  # Get the path to the bundled FFmpeg binary from imageio-ffmpeg
256
  ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
257
+
258
  # Check if input file exists
259
  if not os.path.exists(input_file):
260
  print(f"Error: Input file {input_file} does not exist")
261
  return False
262
+
263
  # Create a temporary file path
264
  temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
265
+
266
  # FFmpeg command using the bundled binary
267
  command = [
268
  ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
 
273
  '-y', # overwrite output file if it exists
274
  temp_file # temporary output file
275
  ]
276
+
277
  # Run the FFmpeg command
278
  result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
279
+
280
  if result.returncode == 0:
281
  # Replace the original file with the modified one
282
  shutil.move(temp_file, input_file)
 
288
  os.remove(temp_file)
289
  print(f"Error: FFmpeg failed with message:\n{result.stderr}")
290
  return False
291
+
292
  except Exception as e:
293
  # Clean up temp file in case of other errors
294
  if 'temp_file' in locals() and os.path.exists(temp_file):
 
299
  # 20250506 pftq: Modified worker to accept video input and clean frame count
300
  @spaces.GPU()
301
  @torch.no_grad()
302
+ def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
303
+
304
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
305
 
306
  try:
 
336
  #start_latent, input_image_np, history_latents, fps = video_encode(input_video, vae, height, width, vae_batch_size=16, device=gpu)
337
  start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
338
 
339
+ #Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
340
 
341
  # CLIP Vision
342
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
 
360
  for idx in range(batch):
361
  if batch > 1:
362
  print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
363
+
364
  #job_id = generate_timestamp()
365
  job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepackf1-videoinput_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}" # 20250506 pftq: easier to read timestamp and filename
366
+
367
  # Sampling
368
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
369
+
370
  rnd = torch.Generator("cpu").manual_seed(seed)
371
+
372
  # 20250506 pftq: Initialize history_latents with video latents
373
  history_latents = video_latents.cpu()
374
  total_generated_latent_frames = history_latents.shape[2]
375
  # 20250506 pftq: Initialize history_pixels to fix UnboundLocalError
376
  history_pixels = None
377
  previous_video = None
378
+
379
  # 20250507 pftq: hot fix for initial video being corrupted by vae encoding, issue with ghosting because of slight differences
380
+ #history_pixels = input_video_pixels
381
  #save_bcthw_as_mp4(vae_decode(video_latents, vae).cpu(), os.path.join(outputs_folder, f'{job_id}_input_video.mp4'), fps=fps, crf=mp4_crf) # 20250507 pftq: test fast movement corrupted by vae encoding if vae batch size too low
382
+
383
  for section_index in range(total_latent_sections):
384
  if stream.input_queue.top() == 'end':
385
  stream.output_queue.push(('end', None))
386
  return
387
+
388
  print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
389
+
390
  if not high_vram:
391
  unload_complete_models()
392
  move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
393
+
394
  if use_teacache:
395
  transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
396
  else:
397
  transformer.initialize_teacache(enable_teacache=False)
398
+
399
  def callback(d):
400
  preview = d['denoised']
401
  preview = vae_decode_fake(preview)
402
+
403
  preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
404
  preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
405
+
406
  if stream.input_queue.top() == 'end':
407
  stream.output_queue.push(('end', None))
408
  raise KeyboardInterrupt('User ends the task.')
409
+
410
  current_step = d['i'] + 1
411
  percentage = int(100.0 * current_step / steps)
412
  hint = f'Sampling {current_step}/{steps}'
413
  desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
414
  stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
415
  return
416
+
417
  # 20250506 pftq: Use user-specified number of context frames, matching original allocation for num_clean_frames=2
418
  available_frames = history_latents.shape[2] # Number of latent frames
419
  max_pixel_frames = min(latent_window_size * 4 - 3, available_frames * 4) # Cap at available pixel frames
 
423
  effective_clean_frames = min(effective_clean_frames, available_frames - 2) if available_frames > 2 else 0 # 20250507 pftq: changed 1 to 2 for edge case for <=1 sec videos
424
  num_2x_frames = min(2, max(1, available_frames - effective_clean_frames - 1)) if available_frames > effective_clean_frames + 1 else 0 # 20250507 pftq: subtracted 1 for edge case for <=1 sec videos
425
  num_4x_frames = min(16, max(1, available_frames - effective_clean_frames - num_2x_frames)) if available_frames > effective_clean_frames + num_2x_frames else 0 # 20250507 pftq: Edge case for <=1 sec
426
+
427
  total_context_frames = num_4x_frames + num_2x_frames + effective_clean_frames
428
  total_context_frames = min(total_context_frames, available_frames) # 20250507 pftq: Edge case for <=1 sec videos
429
 
 
432
  [1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames], dim=1 # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
433
  )
434
  clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
435
+
436
  # 20250506 pftq: Split history_latents dynamically based on available frames
437
  fallback_frame_count = 2 # 20250507 pftq: Changed 0 to 2 Edge case for <=1 sec videos
438
+ context_frames = history_latents[:, :, -total_context_frames:, :, :] if total_context_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
439
  if total_context_frames > 0:
440
+ split_sizes = [num_4x_frames, num_2x_frames, effective_clean_frames]
441
  split_sizes = [s for s in split_sizes if s > 0] # Remove zero sizes
442
  if split_sizes:
443
  splits = context_frames.split(split_sizes, dim=2)
 
450
  if clean_latents_2x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
451
  clean_latents_2x = torch.cat([clean_latents_2x, clean_latents_2x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
452
  split_idx += 1 if num_2x_frames > 0 else 0
453
+ clean_latents_1x = splits[split_idx] if effective_clean_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
454
  else:
455
+ clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
456
  else:
457
+ clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
458
+
459
  clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
460
 
461
  # 20250507 pftq: Fix for <=1 sec videos.
 
490
  clean_latent_4x_indices=clean_latent_4x_indices,
491
  callback=callback,
492
  )
493
+
494
  total_generated_latent_frames += int(generated_latents.shape[2])
495
  history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
496
+
497
  if not high_vram:
498
  offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
499
  load_model_as_complete(vae, target_device=gpu)
500
+
501
  real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
502
+
503
  if history_pixels is None:
504
  history_pixels = vae_decode(real_history_latents, vae).cpu()
505
  else:
506
  section_latent_frames = latent_window_size * 2
507
  overlapped_frames = min(latent_window_size * 4 - 3, history_pixels.shape[2])
508
+
509
+ #if section_index == 0:
510
  #extra_latents = 1 # Add up to 2 extra latent frames for smoother overlap to initial video
511
  #extra_pixel_frames = extra_latents * 4 # Approx. 4 pixel frames per latent
512
  #overlapped_frames = min(overlapped_frames + extra_pixel_frames, history_pixels.shape[2], section_latent_frames * 4)
513
 
514
  current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
515
  history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
516
+
517
  if not high_vram:
518
  unload_complete_models()
519
+
520
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
521
+
522
  # 20250506 pftq: Use input video FPS for output
523
  save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
524
  print(f"Latest video saved: {output_filename}")
525
  # 20250508 pftq: Save prompt to mp4 metadata comments
526
  set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
527
  print(f"Prompt saved to mp4 metadata comments: {output_filename}")
528
+
529
  # 20250506 pftq: Clean up previous partial files
530
  if previous_video is not None and os.path.exists(previous_video):
531
  try:
 
534
  except Exception as e:
535
  print(f"Error deleting previous partial video {previous_video}: {e}")
536
  previous_video = output_filename
537
+
538
  print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
539
+
540
  stream.output_queue.push(('file', output_filename))
541
 
542
  seed = (seed + 1) % np.iinfo(np.int32).max
 
551
 
552
  stream.output_queue.push(('end', None))
553
  return
554
+
555
+ def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
556
  global total_second_length_debug_value
557
  if total_second_length_debug_value is not None:
558
+ return min(total_second_length_debug_value * 60 * 10, 600)
559
+ return total_second_length * 60 * 10
560
 
561
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
562
+ @spaces.GPU(duration=get_duration_video)
563
+ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
564
  global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
565
 
566
  if torch.cuda.device_count() == 0:
 
599
  # 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
600
  if cfg > 1:
601
  gs = 1
602
+
603
  stream = AsyncStream()
604
 
605
  # 20250506 pftq: Pass num_clean_frames, vae_batch, etc
606
+ async_run(worker_video, input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
607
 
608
  output_filename = None
609
 
 
626
  def end_process():
627
  stream.input_queue.push('end')
628
 
 
 
 
 
 
629
 
630
  css = make_progress_bar_css()
631
  block = gr.Blocks(css=css).queue()
 
634
  with gr.Row():
635
  gr.HTML("""
636
  <p style="background-color: red;"><big><big><big><b>⚠️To use FramePack, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
637
+
638
  You can't use FramePack directly here because this space runs on a CPU, which is not enough for FramePack. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues.
639
  </big></big></big></p>
640
  """)
641
  # 20250506 pftq: Updated title to reflect video input functionality
642
  gr.Markdown('# Framepack F1 with Image Input or with Video Input (Video Extension)')
643
+ gr.Markdown(f"""### Video diffusion, but feels like image diffusion
644
+ *FramePack F1 - a FramePack model that only predicts future frames from history frames*
645
+ ### *beta* FramePack Fill 🖋️- draw a mask over the input image to inpaint the video output
646
+ adapted from the officical code repo [FramePack](https://github.com/lllyasviel/FramePack) by [lllyasviel](lllyasviel/FramePack_F1_I2V_HY_20250503) and [FramePack Studio](https://github.com/colinurbs/FramePack-Studio) 🙌🏻
647
+ """)
648
  with gr.Row():
649
  with gr.Column():
 
650
  input_video = gr.Video(sources='upload', label="Input Video", height=320)
651
  prompt = gr.Textbox(label="Prompt", value='')
 
 
652
 
653
  with gr.Row():
654
+ start_button = gr.Button(value="Start Generation", variant="primary")
655
+ end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
656
 
657
  with gr.Accordion("Advanced settings", open=False):
658
  with gr.Row():
 
667
  resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False)
668
 
669
  total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
670
+
671
  # 20250506 pftq: Reduced default distilled guidance scale to improve adherence to input video
672
  gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.')
673
  cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use this instead of Distilled for more detail/control + Negative Prompt (make sure Distilled set to 1). Doubles render time.') # Should not change
674
  rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
675
 
676
+ n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
677
  steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Increase for more quality, especially if using high non-distilled CFG.')
678
 
679
  # 20250506 pftq: Renamed slider to Number of Context Frames and updated description
680
  num_clean_frames = gr.Slider(label="Number of Context Frames", minimum=2, maximum=10, value=5, step=1, info="Retain more video details but increase memory use. Reduce to 2 if memory issues.")
681
+
682
  default_vae = 32
683
  if high_vram:
684
  default_vae = 128
685
  elif free_mem_gb>=20:
686
  default_vae = 64
687
+
688
  vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Reduce if running out of memory. Increase for better quality frames during fast motion.")
689
 
690
+ latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=33, value=9, step=1, visible=True, info='Generate more frames at a time (larger chunks). Less degradation and better blending but higher VRAM cost.')
691
 
692
  gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
693
 
694
  mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
695
 
696
+ with gr.Accordion("Debug", open=False):
697
  input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
698
  prompt_debug = gr.Textbox(label="Prompt Debug", value='')
699
  total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
 
730
  ],
731
  ],
732
  run_on_click = True,
733
+ fn = process_video,
734
  inputs = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch],
735
  outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
736
  cache_examples = True,
737
  )
738
 
739
+ gr.Markdown('## Guide')
740
+ gr.Markdown("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.")
741
+
742
 
743
  # 20250506 pftq: Updated inputs to include num_clean_frames
744
  ips = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
745
+ start_button.click(fn=process_video, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
746
  end_button.click(fn=end_process)
747
 
748