Fabrice-TIERCELIN commited on
Commit
7d3e529
·
verified ·
1 Parent(s): faa16c5

Like prod

Browse files
Files changed (1) hide show
  1. app.py +5 -68
app.py CHANGED
@@ -108,8 +108,6 @@ stream = AsyncStream()
108
  outputs_folder = './outputs/'
109
  os.makedirs(outputs_folder, exist_ok=True)
110
 
111
- input_image_debug_value = input_video_debug_value = prompt_debug_value = total_second_length_debug_value = None
112
-
113
  default_local_storage = {
114
  "generation-mode": "image",
115
  }
@@ -518,10 +516,6 @@ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length
518
  return
519
 
520
  def get_duration(input_image, prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
521
- global total_second_length_debug_value
522
-
523
- if total_second_length_debug_value is not None:
524
- return min(total_second_length_debug_value * 60 * 10, 600)
525
  return total_second_length * 60 * (0.7 if use_teacache else 1.3) * (2**((resolution - 640) / 640)) * (1 + ((steps - 25) / 100))
526
 
527
 
@@ -543,19 +537,13 @@ def process(input_image, prompt,
543
  use_teacache=False,
544
  mp4_crf=16
545
  ):
546
- global stream, input_image_debug_value, prompt_debug_value, total_second_length_debug_value
547
 
548
  if torch.cuda.device_count() == 0:
549
  gr.Warning('Set this space to GPU config to make it work.')
550
  yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
551
  return
552
 
553
- if input_image_debug_value is not None or prompt_debug_value is not None or total_second_length_debug_value is not None:
554
- input_image = input_image_debug_value
555
- prompt = prompt_debug_value
556
- total_second_length = total_second_length_debug_value
557
- input_image_debug_value = prompt_debug_value = total_second_length_debug_value = None
558
-
559
  if randomize_seed:
560
  seed = random.randint(0, np.iinfo(np.int32).max)
561
 
@@ -694,10 +682,6 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
694
  history_pixels = None
695
  previous_video = None
696
 
697
- # 20250507 pftq: hot fix for initial video being corrupted by vae encoding, issue with ghosting because of slight differences
698
- #history_pixels = input_video_pixels
699
- #save_bcthw_as_mp4(vae_decode(video_latents, vae).cpu(), os.path.join(outputs_folder, f'{job_id}_input_video.mp4'), fps=fps, crf=mp4_crf) # 20250507 pftq: test fast movement corrupted by vae encoding if vae batch size too low
700
-
701
  for section_index in range(total_latent_sections):
702
  if stream.input_queue.top() == 'end':
703
  stream.output_queue.push(('end', None))
@@ -860,27 +844,18 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
860
  return
861
 
862
  def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
863
- global total_second_length_debug_value
864
- if total_second_length_debug_value is not None:
865
- return min(total_second_length_debug_value * 60 * 10, 600)
866
  return total_second_length * 60 * (0.7 if use_teacache else 2) * (2**((resolution - 640) / 640)) * (1 + ((steps - 25) / 100))
867
 
868
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
869
  @spaces.GPU(duration=get_duration_video)
870
  def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
871
- global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
872
 
873
  if torch.cuda.device_count() == 0:
874
  gr.Warning('Set this space to GPU config to make it work.')
875
  yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
876
  return
877
 
878
- if input_video_debug_value is not None or prompt_debug_value is not None or total_second_length_debug_value is not None:
879
- input_video = input_video_debug_value
880
- prompt = prompt_debug_value
881
- total_second_length = total_second_length_debug_value
882
- input_video_debug_value = prompt_debug_value = total_second_length_debug_value = None
883
-
884
  if randomize_seed:
885
  seed = random.randint(0, np.iinfo(np.int32).max)
886
 
@@ -1064,12 +1039,6 @@ with block:
1064
  randomize_seed = gr.Checkbox(label='Randomize seed', value=True, info='If checked, the seed is always different')
1065
  seed = gr.Slider(label="Seed", minimum=0, maximum=np.iinfo(np.int32).max, step=1, randomize=True)
1066
 
1067
- with gr.Accordion("Debug", open=False):
1068
- input_image_debug = gr.Image(type="numpy", label="Image Debug", height=320)
1069
- input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
1070
- prompt_debug = gr.Textbox(label="Prompt Debug", value='')
1071
- total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
1072
-
1073
  with gr.Column():
1074
  preview_image = gr.Image(label="Next Latents", height=200, visible=False)
1075
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
@@ -1198,7 +1167,7 @@ with block:
1198
  True, # randomize_seed
1199
  42, # seed
1200
  1, # batch
1201
- 672, # resolution
1202
  1, # total_second_length
1203
  9, # latent_window_size
1204
  35, # steps
@@ -1230,7 +1199,7 @@ with block:
1230
  "Missing arm, unrealistic position, impossible contortion, blurred, blurry", # n_prompt
1231
  True, # randomize_seed
1232
  42, # seed
1233
- 672, # resolution
1234
  1, # total_second_length
1235
  9, # latent_window_size
1236
  25, # steps
@@ -1259,7 +1228,7 @@ with block:
1259
  True, # randomize_seed
1260
  42, # seed
1261
  1, # batch
1262
- 672, # resolution
1263
  1, # total_second_length
1264
  9, # latent_window_size
1265
  25, # steps
@@ -1291,44 +1260,12 @@ with block:
1291
  return [gr.update(visible = False), gr.update(visible = False), gr.update(visible = True), gr.update(visible = False), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True)]
1292
 
1293
 
1294
- def handle_field_debug_change(input_image_debug_data, input_video_debug_data, prompt_debug_data, total_second_length_debug_data):
1295
- global input_image_debug_value, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
1296
- input_image_debug_value = input_image_debug_data
1297
- input_video_debug_value = input_video_debug_data
1298
- prompt_debug_value = prompt_debug_data
1299
- total_second_length_debug_value = total_second_length_debug_data
1300
- return []
1301
-
1302
  generation_mode.change(
1303
  fn=handle_generation_mode_change,
1304
  inputs=[generation_mode],
1305
  outputs=[text_to_video_hint, input_image, input_video, start_button, start_button_video, no_resize, batch, num_clean_frames, vae_batch]
1306
  )
1307
 
1308
- input_image_debug.upload(
1309
- fn=handle_field_debug_change,
1310
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1311
- outputs=[]
1312
- )
1313
-
1314
- input_video_debug.upload(
1315
- fn=handle_field_debug_change,
1316
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1317
- outputs=[]
1318
- )
1319
-
1320
- prompt_debug.change(
1321
- fn=handle_field_debug_change,
1322
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1323
- outputs=[]
1324
- )
1325
-
1326
- total_second_length_debug.change(
1327
- fn=handle_field_debug_change,
1328
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1329
- outputs=[]
1330
- )
1331
-
1332
  # Update display when the page loads
1333
  block.load(
1334
  fn=handle_generation_mode_change, inputs = [
 
108
  outputs_folder = './outputs/'
109
  os.makedirs(outputs_folder, exist_ok=True)
110
 
 
 
111
  default_local_storage = {
112
  "generation-mode": "image",
113
  }
 
516
  return
517
 
518
  def get_duration(input_image, prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
 
 
 
 
519
  return total_second_length * 60 * (0.7 if use_teacache else 1.3) * (2**((resolution - 640) / 640)) * (1 + ((steps - 25) / 100))
520
 
521
 
 
537
  use_teacache=False,
538
  mp4_crf=16
539
  ):
540
+ global stream
541
 
542
  if torch.cuda.device_count() == 0:
543
  gr.Warning('Set this space to GPU config to make it work.')
544
  yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
545
  return
546
 
 
 
 
 
 
 
547
  if randomize_seed:
548
  seed = random.randint(0, np.iinfo(np.int32).max)
549
 
 
682
  history_pixels = None
683
  previous_video = None
684
 
 
 
 
 
685
  for section_index in range(total_latent_sections):
686
  if stream.input_queue.top() == 'end':
687
  stream.output_queue.push(('end', None))
 
844
  return
845
 
846
  def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
 
 
 
847
  return total_second_length * 60 * (0.7 if use_teacache else 2) * (2**((resolution - 640) / 640)) * (1 + ((steps - 25) / 100))
848
 
849
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
850
  @spaces.GPU(duration=get_duration_video)
851
  def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
852
+ global stream, high_vram
853
 
854
  if torch.cuda.device_count() == 0:
855
  gr.Warning('Set this space to GPU config to make it work.')
856
  yield gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
857
  return
858
 
 
 
 
 
 
 
859
  if randomize_seed:
860
  seed = random.randint(0, np.iinfo(np.int32).max)
861
 
 
1039
  randomize_seed = gr.Checkbox(label='Randomize seed', value=True, info='If checked, the seed is always different')
1040
  seed = gr.Slider(label="Seed", minimum=0, maximum=np.iinfo(np.int32).max, step=1, randomize=True)
1041
 
 
 
 
 
 
 
1042
  with gr.Column():
1043
  preview_image = gr.Image(label="Next Latents", height=200, visible=False)
1044
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
 
1167
  True, # randomize_seed
1168
  42, # seed
1169
  1, # batch
1170
+ 640, # resolution
1171
  1, # total_second_length
1172
  9, # latent_window_size
1173
  35, # steps
 
1199
  "Missing arm, unrealistic position, impossible contortion, blurred, blurry", # n_prompt
1200
  True, # randomize_seed
1201
  42, # seed
1202
+ 640, # resolution
1203
  1, # total_second_length
1204
  9, # latent_window_size
1205
  25, # steps
 
1228
  True, # randomize_seed
1229
  42, # seed
1230
  1, # batch
1231
+ 640, # resolution
1232
  1, # total_second_length
1233
  9, # latent_window_size
1234
  25, # steps
 
1260
  return [gr.update(visible = False), gr.update(visible = False), gr.update(visible = True), gr.update(visible = False), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True), gr.update(visible = True)]
1261
 
1262
 
 
 
 
 
 
 
 
 
1263
  generation_mode.change(
1264
  fn=handle_generation_mode_change,
1265
  inputs=[generation_mode],
1266
  outputs=[text_to_video_hint, input_image, input_video, start_button, start_button_video, no_resize, batch, num_clean_frames, vae_batch]
1267
  )
1268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269
  # Update display when the page loads
1270
  block.load(
1271
  fn=handle_generation_mode_change, inputs = [