Do not cache the examples
Browse files
app.py
CHANGED
|
@@ -948,7 +948,22 @@ def handle_prompt_number_change():
|
|
| 948 |
|
| 949 |
def handle_timed_prompt_change(timed_prompt_id, timed_prompt):
|
| 950 |
timed_prompts[timed_prompt_id] = timed_prompt
|
| 951 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 952 |
|
| 953 |
css = make_progress_bar_css()
|
| 954 |
block = gr.Blocks(css=css).queue()
|
|
@@ -956,18 +971,12 @@ with block:
|
|
| 956 |
if torch.cuda.device_count() == 0:
|
| 957 |
with gr.Row():
|
| 958 |
gr.HTML("""
|
| 959 |
-
<p style="background-color: red;"><big><big><big><b>⚠️To use FramePack, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/
|
| 960 |
|
| 961 |
-
You can't use FramePack directly here because this space runs on a CPU, which is not enough for FramePack. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/
|
| 962 |
</big></big></big></p>
|
| 963 |
""")
|
| 964 |
-
|
| 965 |
-
gr.Markdown('# Framepack F1 with Image Input or with Video Input (Video Extension)')
|
| 966 |
-
gr.Markdown(f"""### Video diffusion, but feels like image diffusion
|
| 967 |
-
*FramePack F1 - a FramePack model that only predicts future frames from history frames*
|
| 968 |
-
### *beta* FramePack Fill 🖋️- draw a mask over the input image to inpaint the video output
|
| 969 |
-
adapted from the official code repo [FramePack](https://github.com/lllyasviel/FramePack) by [lllyasviel](lllyasviel/FramePack_F1_I2V_HY_20250503) and [FramePack Studio](https://github.com/colinurbs/FramePack-Studio) 🙌🏻
|
| 970 |
-
""")
|
| 971 |
with gr.Row():
|
| 972 |
with gr.Column():
|
| 973 |
generation_mode = gr.Radio([["Text-to-Video", "text"], ["Image-to-Video", "image"], ["Video-to-Video", "video"]], label="Generation mode", value = "image")
|
|
@@ -1131,7 +1140,7 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
|
|
| 1131 |
fn = process,
|
| 1132 |
inputs = ips,
|
| 1133 |
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
| 1134 |
-
cache_examples =
|
| 1135 |
)
|
| 1136 |
|
| 1137 |
with gr.Row(elem_id="video_examples", visible=False):
|
|
@@ -1163,7 +1172,7 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
|
|
| 1163 |
fn = process_video,
|
| 1164 |
inputs = ips_video,
|
| 1165 |
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button],
|
| 1166 |
-
cache_examples =
|
| 1167 |
)
|
| 1168 |
|
| 1169 |
|
|
|
|
| 948 |
|
| 949 |
def handle_timed_prompt_change(timed_prompt_id, timed_prompt):
|
| 950 |
timed_prompts[timed_prompt_id] = timed_prompt
|
| 951 |
+
dict_values = {k: v for k, v in timed_prompts.items()}
|
| 952 |
+
sorted_dict_values = sorted(dict_values.items(), key=lambda x: x[0])
|
| 953 |
+
array = []
|
| 954 |
+
for sorted_dict_value in sorted_dict_values:
|
| 955 |
+
array.append(sorted_dict_value[1])
|
| 956 |
+
print(str(array))
|
| 957 |
+
return ";".join(array)
|
| 958 |
+
|
| 959 |
+
title_html = """
|
| 960 |
+
<h1><center>FramePack</center></h1>
|
| 961 |
+
<big><center>Generate videos from text/image/video freely, without account, without watermark and download it</center></big>
|
| 962 |
+
<br/>
|
| 963 |
+
<br/>
|
| 964 |
+
|
| 965 |
+
<p>This space is ready to work on ZeroGPU and GPU and has been tested successfully on ZeroGPU. Please leave a <a href="discussions/new">message in discussion</a> if you encounter issues.</p>
|
| 966 |
+
"""
|
| 967 |
|
| 968 |
css = make_progress_bar_css()
|
| 969 |
block = gr.Blocks(css=css).queue()
|
|
|
|
| 971 |
if torch.cuda.device_count() == 0:
|
| 972 |
with gr.Row():
|
| 973 |
gr.HTML("""
|
| 974 |
+
<p style="background-color: red;"><big><big><big><b>⚠️To use FramePack, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/FramePack?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
|
| 975 |
|
| 976 |
+
You can't use FramePack directly here because this space runs on a CPU, which is not enough for FramePack. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/FramePack/discussions/new">feedback</a> if you have issues.
|
| 977 |
</big></big></big></p>
|
| 978 |
""")
|
| 979 |
+
gr.HTML(title_html)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 980 |
with gr.Row():
|
| 981 |
with gr.Column():
|
| 982 |
generation_mode = gr.Radio([["Text-to-Video", "text"], ["Image-to-Video", "image"], ["Video-to-Video", "video"]], label="Generation mode", value = "image")
|
|
|
|
| 1140 |
fn = process,
|
| 1141 |
inputs = ips,
|
| 1142 |
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
| 1143 |
+
cache_examples = torch.cuda.device_count() > 0,
|
| 1144 |
)
|
| 1145 |
|
| 1146 |
with gr.Row(elem_id="video_examples", visible=False):
|
|
|
|
| 1172 |
fn = process_video,
|
| 1173 |
inputs = ips_video,
|
| 1174 |
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button],
|
| 1175 |
+
cache_examples = torch.cuda.device_count() > 0,
|
| 1176 |
)
|
| 1177 |
|
| 1178 |
|