Fabrice-TIERCELIN commited on
Commit
ba007f2
·
verified ·
1 Parent(s): fe2fc85
Files changed (1) hide show
  1. app.py +14 -13
app.py CHANGED
@@ -844,7 +844,7 @@ def process(input_image,
844
 
845
  if torch.cuda.device_count() == 0:
846
  gr.Warning('Set this space to GPU config to make it work.')
847
- yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
848
  return
849
 
850
  if randomize_seed:
@@ -858,7 +858,7 @@ def process(input_image,
858
  input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
859
  print("No input image provided. Using a blank white image.")
860
 
861
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
862
 
863
  stream = AsyncStream()
864
 
@@ -871,11 +871,11 @@ def process(input_image,
871
 
872
  if flag == 'file':
873
  output_filename = data
874
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
875
 
876
  if flag == 'progress':
877
  preview, desc, html = data
878
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
879
 
880
  if flag == 'end':
881
  end = time.time()
@@ -888,7 +888,7 @@ def process(input_image,
888
  ((str(hours) + " h, ") if hours != 0 else "") + \
889
  ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
890
  str(secondes) + " sec. " + \
891
- "You can upscale the result with RIFE. To make all your generated scenes consistent, you can then apply a face swap on the main character. If you do not see the generated video above, the process may have failed. See the logs for more information. If you see an error like ''NVML_SUCCESS == r INTERNAL ASSERT FAILED'', you probably haven't enough VRAM. Test an example or other options to compare. You can share your inputs to the original space or set your space in public for a peer review.", gr.update(interactive=True), gr.update(interactive=False)
892
  break
893
 
894
  def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
@@ -911,7 +911,7 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
911
 
912
  if torch.cuda.device_count() == 0:
913
  gr.Warning('Set this space to GPU config to make it work.')
914
- yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
915
  return
916
 
917
  if randomize_seed:
@@ -922,7 +922,7 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
922
  # 20250506 pftq: Updated assertion for video input
923
  assert input_video is not None, 'No input video!'
924
 
925
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
926
 
927
  # 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
928
  if high_vram and (no_resize or resolution>640):
@@ -949,11 +949,11 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
949
 
950
  if flag == 'file':
951
  output_filename = data
952
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
953
 
954
  if flag == 'progress':
955
  preview, desc, html = data
956
- yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
957
 
958
  if flag == 'end':
959
  end = time.time()
@@ -967,7 +967,7 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
967
  ((str(hours) + " h, ") if hours != 0 else "") + \
968
  ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
969
  str(secondes) + " sec. " + \
970
- " You can upscale the result with RIFE. To make all your generated scenes consistent, you can then apply a face swap on the main character. If you do not see the generated video above, the process may have failed. See the logs for more information. If you see an error like ''NVML_SUCCESS == r INTERNAL ASSERT FAILED'', you probably haven't enough VRAM. Test an example or other options to compare. You can share your inputs to the original space or set your space in public for a peer review.", '', gr.update(interactive=True), gr.update(interactive=False)
971
  break
972
 
973
  def end_process():
@@ -1116,6 +1116,7 @@ with block:
1116
  total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
1117
 
1118
  with gr.Column():
 
1119
  preview_image = gr.Image(label="Next Latents", height=200, visible=False)
1120
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
1121
  progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
@@ -1415,7 +1416,7 @@ with block:
1415
  raise gr.Error("Please provide an image to extend.")
1416
  if generation_mode == "video" and input_video is None:
1417
  raise gr.Error("Please provide a video to extend.")
1418
- return gr.update(interactive=True)
1419
 
1420
  def handle_generation_mode_change(generation_mode_data):
1421
  if generation_mode_data == "text":
@@ -1463,10 +1464,10 @@ with block:
1463
  timeless_prompt.change(fn=handle_timeless_prompt_change, inputs=[timeless_prompt], outputs=[final_prompt])
1464
  start_button.click(fn = check_parameters, inputs = [
1465
  generation_mode, input_image, input_video
1466
- ], outputs = [end_button], queue = False, show_progress = False).success(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
1467
  start_button_video.click(fn = check_parameters, inputs = [
1468
  generation_mode, input_image, input_video
1469
- ], outputs = [end_button], queue = False, show_progress = False).success(fn=process_video, inputs=ips_video, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button])
1470
  end_button.click(fn=end_process)
1471
 
1472
  generation_mode.change(fn = save_preferences, inputs = [
 
844
 
845
  if torch.cuda.device_count() == 0:
846
  gr.Warning('Set this space to GPU config to make it work.')
847
+ yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(visible = False)
848
  return
849
 
850
  if randomize_seed:
 
858
  input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
859
  print("No input image provided. Using a blank white image.")
860
 
861
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True), gr.update()
862
 
863
  stream = AsyncStream()
864
 
 
871
 
872
  if flag == 'file':
873
  output_filename = data
874
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True), gr.update()
875
 
876
  if flag == 'progress':
877
  preview, desc, html = data
878
+ yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True), gr.update()
879
 
880
  if flag == 'end':
881
  end = time.time()
 
888
  ((str(hours) + " h, ") if hours != 0 else "") + \
889
  ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
890
  str(secondes) + " sec. " + \
891
+ "You can upscale the result with RIFE. To make all your generated scenes consistent, you can then apply a face swap on the main character. If you do not see the generated video above, the process may have failed. See the logs for more information. If you see an error like ''NVML_SUCCESS == r INTERNAL ASSERT FAILED'', you probably haven't enough VRAM. Test an example or other options to compare. You can share your inputs to the original space or set your space in public for a peer review.", gr.update(interactive=True), gr.update(interactive=False), gr.update(visible = False)
892
  break
893
 
894
  def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
 
911
 
912
  if torch.cuda.device_count() == 0:
913
  gr.Warning('Set this space to GPU config to make it work.')
914
+ yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(visible = False)
915
  return
916
 
917
  if randomize_seed:
 
922
  # 20250506 pftq: Updated assertion for video input
923
  assert input_video is not None, 'No input video!'
924
 
925
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True), gr.update()
926
 
927
  # 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
928
  if high_vram and (no_resize or resolution>640):
 
949
 
950
  if flag == 'file':
951
  output_filename = data
952
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True), gr.update()
953
 
954
  if flag == 'progress':
955
  preview, desc, html = data
956
+ yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True), gr.update() # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
957
 
958
  if flag == 'end':
959
  end = time.time()
 
967
  ((str(hours) + " h, ") if hours != 0 else "") + \
968
  ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
969
  str(secondes) + " sec. " + \
970
+ " You can upscale the result with RIFE. To make all your generated scenes consistent, you can then apply a face swap on the main character. If you do not see the generated video above, the process may have failed. See the logs for more information. If you see an error like ''NVML_SUCCESS == r INTERNAL ASSERT FAILED'', you probably haven't enough VRAM. Test an example or other options to compare. You can share your inputs to the original space or set your space in public for a peer review.", '', gr.update(interactive=True), gr.update(interactive=False), gr.update(visible = False)
971
  break
972
 
973
  def end_process():
 
1116
  total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
1117
 
1118
  with gr.Column():
1119
+ warning = gr.HTML(value = "<center><big>Your computer must <u>not</u> enter into standby mode.</big><br/>On Chrome, you can force to keep a tab alive in <code>chrome://discards/</code></center>", visible = False)
1120
  preview_image = gr.Image(label="Next Latents", height=200, visible=False)
1121
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
1122
  progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
 
1416
  raise gr.Error("Please provide an image to extend.")
1417
  if generation_mode == "video" and input_video is None:
1418
  raise gr.Error("Please provide a video to extend.")
1419
+ return [gr.update(interactive=True), gr.update(visible = True)]
1420
 
1421
  def handle_generation_mode_change(generation_mode_data):
1422
  if generation_mode_data == "text":
 
1464
  timeless_prompt.change(fn=handle_timeless_prompt_change, inputs=[timeless_prompt], outputs=[final_prompt])
1465
  start_button.click(fn = check_parameters, inputs = [
1466
  generation_mode, input_image, input_video
1467
+ ], outputs = [end_button, warning], queue = False, show_progress = False).success(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button, warning])
1468
  start_button_video.click(fn = check_parameters, inputs = [
1469
  generation_mode, input_image, input_video
1470
+ ], outputs = [end_button, warning], queue = False, show_progress = False).success(fn=process_video, inputs=ips_video, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button, warning])
1471
  end_button.click(fn=end_process)
1472
 
1473
  generation_mode.change(fn = save_preferences, inputs = [