Fabrice-TIERCELIN commited on
Commit
ac697e1
·
verified ·
1 Parent(s): e9848b2

Add more code

Browse files
Files changed (1) hide show
  1. app.py +82 -15
app.py CHANGED
@@ -930,12 +930,13 @@ def process(input_image,
930
  if auto_allocation:
931
  allocation_time = min(total_second_length * 60 * (1.5 if use_teacache else 3.0) * (1 + ((steps - 25) / 25))**2, 600)
932
 
933
- if input_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None:
934
  input_image = input_image_debug_value[0]
 
935
  prompt = prompt_debug_value[0]
936
  total_second_length = total_second_length_debug_value[0]
937
  allocation_time = min(total_second_length_debug_value[0] * 60 * 100, 600)
938
- input_image_debug_value[0] = prompt_debug_value[0] = total_second_length_debug_value[0] = None
939
 
940
  if torch.cuda.device_count() == 0:
941
  gr.Warning('Set this space to GPU config to make it work.')
@@ -1127,7 +1128,7 @@ with block:
1127
  local_storage = gr.BrowserState(default_local_storage)
1128
  with gr.Row():
1129
  with gr.Column():
1130
- generation_mode = gr.Radio([["Text-to-Video", "text"], ["Image-to-Video", "image"], ["Video Extension", "video"]], elem_id="generation-mode", label="Generation mode", value = "image")
1131
  text_to_video_hint = gr.HTML("Text-to-Video badly works with a flash effect at the start. I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.")
1132
  input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
1133
  image_position = gr.Slider(label="Image position", minimum=0, maximum=100, value=0, step=1, info='0=Video start; 100=Video end (lower quality)')
@@ -1666,48 +1667,114 @@ with block:
1666
  def check_parameters(generation_mode, input_image, input_video):
1667
  if generation_mode == "image" and input_image is None:
1668
  raise gr.Error("Please provide an image to extend.")
 
 
1669
  if generation_mode == "video" and input_video is None:
1670
  raise gr.Error("Please provide a video to extend.")
1671
  return [gr.update(interactive=True), gr.update(visible = True)]
1672
 
1673
  def handle_generation_mode_change(generation_mode_data):
1674
  if generation_mode_data == "text":
1675
- return [gr.update(visible = True), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = True), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = True)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676
  elif generation_mode_data == "image":
1677
- return [gr.update(visible = False), gr.update(visible = True), gr.update(visible = True), gr.update(visible = False), gr.update(visible = True), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = True)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1678
  elif generation_mode_data == "video":
1679
- return [gr.update(visible = False), gr.update(visible = False), gr.update(visible = False), gr.update(visible = True), gr.update(visible = False), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = False)]
1680
-
1681
-
1682
- def handle_field_debug_change(input_image_debug_data, input_video_debug_data, prompt_debug_data, total_second_length_debug_data):
 
 
 
 
 
 
 
 
 
 
 
 
 
1683
  print("handle_field_debug_change")
1684
  input_image_debug_value[0] = input_image_debug_data
1685
  input_video_debug_value[0] = input_video_debug_data
 
1686
  prompt_debug_value[0] = prompt_debug_data
1687
  total_second_length_debug_value[0] = total_second_length_debug_data
1688
  return []
1689
 
1690
  input_image_debug.upload(
1691
  fn=handle_field_debug_change,
1692
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1693
  outputs=[]
1694
  )
1695
 
1696
  input_video_debug.upload(
1697
  fn=handle_field_debug_change,
1698
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
 
 
 
 
 
 
1699
  outputs=[]
1700
  )
1701
 
1702
  prompt_debug.change(
1703
  fn=handle_field_debug_change,
1704
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1705
  outputs=[]
1706
  )
1707
 
1708
  total_second_length_debug.change(
1709
  fn=handle_field_debug_change,
1710
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1711
  outputs=[]
1712
  )
1713
 
@@ -1731,7 +1798,7 @@ with block:
1731
  generation_mode.change(
1732
  fn=handle_generation_mode_change,
1733
  inputs=[generation_mode],
1734
- outputs=[text_to_video_hint, image_position, input_image, input_video, start_button, start_button_video, no_resize, batch, num_clean_frames, vae_batch, prompt_hint, fps_number]
1735
  )
1736
 
1737
  # Update display when the page loads
@@ -1739,7 +1806,7 @@ with block:
1739
  fn=handle_generation_mode_change, inputs = [
1740
  generation_mode
1741
  ], outputs = [
1742
- text_to_video_hint, image_position, input_image, input_video, start_button, start_button_video, no_resize, batch, num_clean_frames, vae_batch, prompt_hint, fps_number
1743
  ]
1744
  )
1745
 
 
930
  if auto_allocation:
931
  allocation_time = min(total_second_length * 60 * (1.5 if use_teacache else 3.0) * (1 + ((steps - 25) / 25))**2, 600)
932
 
933
+ if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None:
934
  input_image = input_image_debug_value[0]
935
+ end_image = end_image_debug_value[0]
936
  prompt = prompt_debug_value[0]
937
  total_second_length = total_second_length_debug_value[0]
938
  allocation_time = min(total_second_length_debug_value[0] * 60 * 100, 600)
939
+ input_image_debug_value[0] = end_image_debug_value[0] = input_video_debug_value[0] = prompt_debug_value[0] = total_second_length_debug_value[0] = None
940
 
941
  if torch.cuda.device_count() == 0:
942
  gr.Warning('Set this space to GPU config to make it work.')
 
1128
  local_storage = gr.BrowserState(default_local_storage)
1129
  with gr.Row():
1130
  with gr.Column():
1131
+ generation_mode = gr.Radio([["Text-to-Video", "text"], ["Image-to-Video", "image"], ["Start & end frames", "start_end"], ["Video Extension", "video"]], elem_id="generation-mode", label="Generation mode", value = "image")
1132
  text_to_video_hint = gr.HTML("Text-to-Video badly works with a flash effect at the start. I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.")
1133
  input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
1134
  image_position = gr.Slider(label="Image position", minimum=0, maximum=100, value=0, step=1, info='0=Video start; 100=Video end (lower quality)')
 
1667
  def check_parameters(generation_mode, input_image, input_video):
1668
  if generation_mode == "image" and input_image is None:
1669
  raise gr.Error("Please provide an image to extend.")
1670
+ if generation_mode == "start_end" and input_image is None:
1671
+ raise gr.Error("Please provide an image to extend.")
1672
  if generation_mode == "video" and input_video is None:
1673
  raise gr.Error("Please provide a video to extend.")
1674
  return [gr.update(interactive=True), gr.update(visible = True)]
1675
 
1676
  def handle_generation_mode_change(generation_mode_data):
1677
  if generation_mode_data == "text":
1678
+ return [
1679
+ gr.update(visible = True), # text_to_video_hint
1680
+ gr.update(visible = False), # image_position
1681
+ gr.update(visible = False), # input_image
1682
+ gr.update(visible = False), # end_image
1683
+ gr.update(visible = False), # input_video
1684
+ gr.update(visible = True), # start_button
1685
+ gr.update(visible = False), # start_button_video
1686
+ gr.update(visible = False), # no_resize
1687
+ gr.update(visible = False), # batch
1688
+ gr.update(visible = False), # num_clean_frames
1689
+ gr.update(visible = False), # vae_batch
1690
+ gr.update(visible = False), # prompt_hint
1691
+ gr.update(visible = True) # fps_number
1692
+ ]
1693
  elif generation_mode_data == "image":
1694
+ return [
1695
+ gr.update(visible = False), # text_to_video_hint
1696
+ gr.update(visible = True), # image_position
1697
+ gr.update(visible = True), # input_image
1698
+ gr.update(visible = False), # end_image
1699
+ gr.update(visible = False), # input_video
1700
+ gr.update(visible = True), # start_button
1701
+ gr.update(visible = False), # start_button_video
1702
+ gr.update(visible = False), # no_resize
1703
+ gr.update(visible = False), # batch
1704
+ gr.update(visible = False), # num_clean_frames
1705
+ gr.update(visible = False), # vae_batch
1706
+ gr.update(visible = False), # prompt_hint
1707
+ gr.update(visible = True) # fps_number
1708
+ ]
1709
+ elif generation_mode_data == "start_end":
1710
+ return [
1711
+ gr.update(visible = False), # text_to_video_hint
1712
+ gr.update(visible = False), # image_position
1713
+ gr.update(visible = True), # input_image
1714
+ gr.update(visible = True), # end_image
1715
+ gr.update(visible = False), # input_video
1716
+ gr.update(visible = True), # start_button
1717
+ gr.update(visible = False), # start_button_video
1718
+ gr.update(visible = False), # no_resize
1719
+ gr.update(visible = False), # batch
1720
+ gr.update(visible = False), # num_clean_frames
1721
+ gr.update(visible = False), # vae_batch
1722
+ gr.update(visible = False), # prompt_hint
1723
+ gr.update(visible = True) # fps_number
1724
+ ]
1725
  elif generation_mode_data == "video":
1726
+ return [
1727
+ gr.update(visible = False), # text_to_video_hint
1728
+ gr.update(visible = False), # image_position
1729
+ gr.update(visible = False), # input_image
1730
+ gr.update(visible = False), # end_image
1731
+ gr.update(visible = True), # input_video
1732
+ gr.update(visible = False), # start_button
1733
+ gr.update(visible = True), # start_button_video
1734
+ gr.update(visible = True), # no_resize
1735
+ gr.update(visible = True), # batch
1736
+ gr.update(visible = True), # num_clean_frames
1737
+ gr.update(visible = True), # vae_batch
1738
+ gr.update(visible = True), # prompt_hint
1739
+ gr.update(visible = False) # fps_number
1740
+ ]
1741
+
1742
+ def handle_field_debug_change(input_image_debug_data, input_video_debug_data, end_image_debug_data, prompt_debug_data, total_second_length_debug_data):
1743
  print("handle_field_debug_change")
1744
  input_image_debug_value[0] = input_image_debug_data
1745
  input_video_debug_value[0] = input_video_debug_data
1746
+ end_image_debug_value[0] = end_image_debug_data
1747
  prompt_debug_value[0] = prompt_debug_data
1748
  total_second_length_debug_value[0] = total_second_length_debug_data
1749
  return []
1750
 
1751
  input_image_debug.upload(
1752
  fn=handle_field_debug_change,
1753
+ inputs=[input_image_debug, input_video_debug, end_image_debug, prompt_debug, total_second_length_debug],
1754
  outputs=[]
1755
  )
1756
 
1757
  input_video_debug.upload(
1758
  fn=handle_field_debug_change,
1759
+ inputs=[input_image_debug, input_video_debug, end_image_debug, prompt_debug, total_second_length_debug],
1760
+ outputs=[]
1761
+ )
1762
+
1763
+ end_image_debug.upload(
1764
+ fn=handle_field_debug_change,
1765
+ inputs=[input_image_debug, input_video_debug, end_image_debug, prompt_debug, total_second_length_debug],
1766
  outputs=[]
1767
  )
1768
 
1769
  prompt_debug.change(
1770
  fn=handle_field_debug_change,
1771
+ inputs=[input_image_debug, input_video_debug, end_image_debug, prompt_debug, total_second_length_debug],
1772
  outputs=[]
1773
  )
1774
 
1775
  total_second_length_debug.change(
1776
  fn=handle_field_debug_change,
1777
+ inputs=[input_image_debug, input_video_debug, end_image_debug, prompt_debug, total_second_length_debug],
1778
  outputs=[]
1779
  )
1780
 
 
1798
  generation_mode.change(
1799
  fn=handle_generation_mode_change,
1800
  inputs=[generation_mode],
1801
+ outputs=[text_to_video_hint, image_position, input_image, end_image, input_video, start_button, start_button_video, no_resize, batch, num_clean_frames, vae_batch, prompt_hint, fps_number]
1802
  )
1803
 
1804
  # Update display when the page loads
 
1806
  fn=handle_generation_mode_change, inputs = [
1807
  generation_mode
1808
  ], outputs = [
1809
+ text_to_video_hint, image_position, input_image, end_image, input_video, start_button, start_button_video, no_resize, batch, num_clean_frames, vae_batch, prompt_hint, fps_number
1810
  ]
1811
  )
1812