Fabrice-TIERCELIN commited on
Commit
fdc65b5
·
verified ·
1 Parent(s): edf9a0d

block = gr.Blocks(css=css, js=js).queue()

Browse files
Files changed (1) hide show
  1. app.py +10 -51
app.py CHANGED
@@ -946,51 +946,6 @@ def refresh_prompt():
946
  print(str(array))
947
  return ";".join(array)
948
 
949
- import time
950
- @spaces.GPU(duration=get_duration_video)
951
- def process_yield(input_image, final_prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
952
- global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
953
-
954
- if torch.cuda.device_count() == 0:
955
- gr.Warning('Set this space to GPU config to make it work.')
956
- yield None, None, None, None, None, None
957
- return
958
-
959
- if input_video_debug_value is not None or prompt_debug_value is not None or total_second_length_debug_value is not None:
960
- input_video = input_video_debug_value
961
- prompt = prompt_debug_value
962
- total_second_length = total_second_length_debug_value
963
- input_video_debug_value = prompt_debug_value = total_second_length_debug_value = None
964
-
965
- if randomize_seed:
966
- seed = random.randint(0, np.iinfo(np.int32).max)
967
-
968
- prompts = prompt.split(";")
969
-
970
- # 20250506 pftq: Updated assertion for video input
971
- assert input_video is not None, 'No input video!'
972
-
973
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
974
-
975
- # 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
976
- if high_vram and (no_resize or resolution>640):
977
- print("Disabling high vram mode due to no resize and/or potentially higher resolution...")
978
- high_vram = False
979
- vae.enable_slicing()
980
- vae.enable_tiling()
981
- DynamicSwapInstaller.install_model(transformer, device=gpu)
982
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
983
-
984
- # 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
985
- if cfg > 1:
986
- gs = 1
987
-
988
- for i in range(3):
989
- time.sleep(1)
990
- yield (gr.update(), gr.update(), "In progress..." + str(i), gr.update(), gr.update(), gr.update(interactive=True))
991
- yield (gr.update(), gr.update(), "Finished", gr.update(), gr.update(), gr.update(interactive=False))
992
- return (gr.update(), gr.update(), "Finished", gr.update(), gr.update(), gr.update(interactive=False))
993
-
994
  title_html = """
995
  <h1><center>FramePack</center></h1>
996
  <big><center>Generate videos from text/image/video freely, without account, without watermark and download it</center></big>
@@ -1000,7 +955,15 @@ title_html = """
1000
  """
1001
 
1002
  css = make_progress_bar_css()
1003
- block = gr.Blocks(css=css).queue()
 
 
 
 
 
 
 
 
1004
  with block:
1005
  if torch.cuda.device_count() == 0:
1006
  with gr.Row():
@@ -1093,8 +1056,6 @@ with block:
1093
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
1094
  progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
1095
  progress_bar = gr.HTML('', elem_classes='no-generating-animation')
1096
-
1097
- run_button = gr.Button("Run", variant="primary")
1098
 
1099
  # 20250506 pftq: Updated inputs to include num_clean_frames
1100
  ips = [input_image, final_prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
@@ -1120,8 +1081,6 @@ with block:
1120
  raise gr.Error("Please provide a video to extend.")
1121
  return [gr.update(interactive=True)]
1122
 
1123
- run_button.click(fn=process_yield, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
1124
-
1125
  prompt_number.change(fn=handle_prompt_number_change, inputs=[], outputs=[])
1126
  timeless_prompt.change(fn=handle_timeless_prompt_change, inputs=[timeless_prompt], outputs=[final_prompt])
1127
  start_button.click(fn = check_parameters, inputs = [
@@ -1233,7 +1192,7 @@ with block:
1233
  ],
1234
  ],
1235
  run_on_click = True,
1236
- fn = process_yield,
1237
  inputs = ips_video,
1238
  outputs = [result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button],
1239
  cache_examples = torch.cuda.device_count() > 0,
 
946
  print(str(array))
947
  return ";".join(array)
948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
949
  title_html = """
950
  <h1><center>FramePack</center></h1>
951
  <big><center>Generate videos from text/image/video freely, without account, without watermark and download it</center></big>
 
955
  """
956
 
957
  css = make_progress_bar_css()
958
+ js = """
959
+ window.addEventListener("beforeunload", function (e) {
960
+ var confirmationMessage = 'It looks like you have been editing something. If you leave before saving, your changes will be lost.';
961
+
962
+ (e || window.event).returnValue = confirmationMessage;
963
+ return confirmationMessage;
964
+ });
965
+ """
966
+ block = gr.Blocks(css=css, js=js).queue()
967
  with block:
968
  if torch.cuda.device_count() == 0:
969
  with gr.Row():
 
1056
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
1057
  progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
1058
  progress_bar = gr.HTML('', elem_classes='no-generating-animation')
 
 
1059
 
1060
  # 20250506 pftq: Updated inputs to include num_clean_frames
1061
  ips = [input_image, final_prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
 
1081
  raise gr.Error("Please provide a video to extend.")
1082
  return [gr.update(interactive=True)]
1083
 
 
 
1084
  prompt_number.change(fn=handle_prompt_number_change, inputs=[], outputs=[])
1085
  timeless_prompt.change(fn=handle_timeless_prompt_change, inputs=[timeless_prompt], outputs=[final_prompt])
1086
  start_button.click(fn = check_parameters, inputs = [
 
1192
  ],
1193
  ],
1194
  run_on_click = True,
1195
+ fn = process_video,
1196
  inputs = ips_video,
1197
  outputs = [result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button],
1198
  cache_examples = torch.cuda.device_count() > 0,