Fabrice-TIERCELIN commited on
Commit
fbe21e2
·
verified ·
1 Parent(s): ed88a11
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -1649,14 +1649,13 @@ with block:
1649
  ips = [input_image, end_image, image_position, end_stillness, final_prompt, generation_mode, n_prompt, randomize_seed, seed, auto_allocation, allocation_time, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf, fps_number]
1650
  ips_video = [input_video, end_image, end_stillness, final_prompt, n_prompt, randomize_seed, seed, auto_allocation, allocation_time, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
1651
 
1652
- with gr.Accordion("Debug", open=False):
1653
  input_image_debug = gr.Image(type="numpy", label="Image Debug", height=320)
1654
  input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
1655
  end_image_debug = gr.Image(type="numpy", label="End Image Debug", height=320)
1656
  prompt_debug = gr.Textbox(label="Prompt Debug", value='')
1657
- total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
1658
 
1659
- with gr.Row(elem_id="text_examples", visible=False):
1660
  gr.Examples(
1661
  label = "Examples from text",
1662
  examples = [
@@ -1694,7 +1693,6 @@ with block:
1694
  cache_examples = torch.cuda.device_count() > 0,
1695
  )
1696
 
1697
- with gr.Row(elem_id="image_examples", visible=False):
1698
  gr.Examples(
1699
  label = "Examples from image",
1700
  examples = [
@@ -1836,7 +1834,6 @@ with block:
1836
  cache_examples = torch.cuda.device_count() > 0,
1837
  )
1838
 
1839
- with gr.Row(elem_id="start_end_examples", visible=False):
1840
  gr.Examples(
1841
  label = "Examples from start and end frames",
1842
  examples = [
@@ -1874,7 +1871,6 @@ with block:
1874
  cache_examples = torch.cuda.device_count() > 0,
1875
  )
1876
 
1877
- with gr.Row(elem_id="video_examples", visible=False):
1878
  gr.Examples(
1879
  label = "Examples from video",
1880
  examples = [
 
1649
  ips = [input_image, end_image, image_position, end_stillness, final_prompt, generation_mode, n_prompt, randomize_seed, seed, auto_allocation, allocation_time, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf, fps_number]
1650
  ips_video = [input_video, end_image, end_stillness, final_prompt, n_prompt, randomize_seed, seed, auto_allocation, allocation_time, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
1651
 
1652
+ with gr.Row(elem_id="cache", visible=False):
1653
  input_image_debug = gr.Image(type="numpy", label="Image Debug", height=320)
1654
  input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
1655
  end_image_debug = gr.Image(type="numpy", label="End Image Debug", height=320)
1656
  prompt_debug = gr.Textbox(label="Prompt Debug", value='')
1657
+ total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (seconds) Debug", minimum=1, maximum=120, value=6, step=0.1)
1658
 
 
1659
  gr.Examples(
1660
  label = "Examples from text",
1661
  examples = [
 
1693
  cache_examples = torch.cuda.device_count() > 0,
1694
  )
1695
 
 
1696
  gr.Examples(
1697
  label = "Examples from image",
1698
  examples = [
 
1834
  cache_examples = torch.cuda.device_count() > 0,
1835
  )
1836
 
 
1837
  gr.Examples(
1838
  label = "Examples from start and end frames",
1839
  examples = [
 
1871
  cache_examples = torch.cuda.device_count() > 0,
1872
  )
1873
 
 
1874
  gr.Examples(
1875
  label = "Examples from video",
1876
  examples = [