Fabrice-TIERCELIN commited on
Commit
eb6ac03
·
verified ·
1 Parent(s): b138ec9

Finish merge

Browse files
Files changed (1) hide show
  1. app_v2v.py +6 -6
app_v2v.py CHANGED
@@ -50,8 +50,6 @@ if torch.cuda.device_count() > 0:
50
  print(f'Free VRAM {free_mem_gb} GB')
51
  print(f'High-VRAM Mode: {high_vram}')
52
 
53
-
54
-
55
  text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
56
  text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
57
  tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
@@ -926,7 +924,8 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
926
  t2v = gr.Checkbox(label="Do text-to-video (ignored for video extension)", value=False)
927
 
928
  with gr.Row():
929
- start_button = gr.Button(value="Start Generation", variant="primary")
 
930
  end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
931
 
932
  total_second_length = gr.Slider(label="Video Length to Generate (seconds)", minimum=1, maximum=120, value=2, step=0.1)
@@ -983,7 +982,8 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
983
  # 20250506 pftq: Updated inputs to include num_clean_frames
984
  ips = [input_image, prompt, t2v, n_prompt, randomize_seed, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
985
  ips_video = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
986
- start_button.click(fn=process_video, inputs=ips_video, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
 
987
  end_button.click(fn=end_process)
988
 
989
  with gr.Row(elem_id="image_examples", visible=False):
@@ -1093,7 +1093,7 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
1093
  run_on_click = True,
1094
  fn = process_video,
1095
  inputs = ips_video,
1096
- outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
1097
  cache_examples = True,
1098
  )
1099
 
@@ -1114,7 +1114,7 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
1114
  inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1115
  outputs=[]
1116
  )
1117
-
1118
  input_video_debug.upload(
1119
  fn=handle_field_debug_change,
1120
  inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
 
50
  print(f'Free VRAM {free_mem_gb} GB')
51
  print(f'High-VRAM Mode: {high_vram}')
52
 
 
 
53
  text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
54
  text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
55
  tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
 
924
  t2v = gr.Checkbox(label="Do text-to-video (ignored for video extension)", value=False)
925
 
926
  with gr.Row():
927
+ start_button = gr.Button(value="Generate from image", variant="primary")
928
+ start_button_video = gr.Button(value="Generate from video", variant="primary")
929
  end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
930
 
931
  total_second_length = gr.Slider(label="Video Length to Generate (seconds)", minimum=1, maximum=120, value=2, step=0.1)
 
982
  # 20250506 pftq: Updated inputs to include num_clean_frames
983
  ips = [input_image, prompt, t2v, n_prompt, randomize_seed, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
984
  ips_video = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
985
+ start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
986
+ start_button_video.click(fn=process_video, inputs=ips_video, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button])
987
  end_button.click(fn=end_process)
988
 
989
  with gr.Row(elem_id="image_examples", visible=False):
 
1093
  run_on_click = True,
1094
  fn = process_video,
1095
  inputs = ips_video,
1096
+ outputs = [result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button],
1097
  cache_examples = True,
1098
  )
1099
 
 
1114
  inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1115
  outputs=[]
1116
  )
1117
+
1118
  input_video_debug.upload(
1119
  fn=handle_field_debug_change,
1120
  inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],