Fabrice-TIERCELIN commited on
Commit
faa16c5
·
verified ·
1 Parent(s): 523ea4c

enable_preview

Browse files
Files changed (1) hide show
  1. app.py +88 -70
app.py CHANGED
@@ -305,7 +305,7 @@ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
305
  return False
306
 
307
  @torch.no_grad()
308
- def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
309
  def encode_prompt(prompt, n_prompt):
310
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
311
 
@@ -398,23 +398,27 @@ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length
398
  history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
399
  total_generated_latent_frames = 1
400
 
401
- def callback(d):
402
- preview = d['denoised']
403
- preview = vae_decode_fake(preview)
404
-
405
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
406
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
407
-
408
- if stream.input_queue.top() == 'end':
409
- stream.output_queue.push(('end', None))
410
- raise KeyboardInterrupt('User ends the task.')
411
-
412
- current_step = d['i'] + 1
413
- percentage = int(100.0 * current_step / steps)
414
- hint = f'Sampling {current_step}/{steps}'
415
- desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
416
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
417
- return
 
 
 
 
418
 
419
  indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
420
  clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
@@ -494,13 +498,14 @@ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length
494
  if not high_vram:
495
  unload_complete_models()
496
 
497
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
 
498
 
499
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
500
 
501
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
502
 
503
- stream.output_queue.push(('file', output_filename))
504
  except:
505
  traceback.print_exc()
506
 
@@ -512,12 +517,12 @@ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length
512
  stream.output_queue.push(('end', None))
513
  return
514
 
515
- def get_duration(input_image, prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
516
  global total_second_length_debug_value
517
 
518
  if total_second_length_debug_value is not None:
519
  return min(total_second_length_debug_value * 60 * 10, 600)
520
- return total_second_length * 60 * (0.7 if use_teacache else 1.3) * (2**((resolution - 640) / 640))
521
 
522
 
523
  @spaces.GPU(duration=get_duration)
@@ -534,6 +539,7 @@ def process(input_image, prompt,
534
  gs=10.0,
535
  rs=0.0,
536
  gpu_memory_preservation=6,
 
537
  use_teacache=False,
538
  mp4_crf=16
539
  ):
@@ -565,7 +571,7 @@ def process(input_image, prompt,
565
 
566
  stream = AsyncStream()
567
 
568
- async_run(worker, input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
569
 
570
  output_filename = None
571
 
@@ -587,7 +593,7 @@ def process(input_image, prompt,
587
  # 20250506 pftq: Modified worker to accept video input and clean frame count
588
  @spaces.GPU()
589
  @torch.no_grad()
590
- def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
591
  def encode_prompt(prompt, n_prompt):
592
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
593
 
@@ -647,23 +653,27 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
647
  total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
648
  total_latent_sections = int(max(round(total_latent_sections), 1))
649
 
650
- def callback(d):
651
- preview = d['denoised']
652
- preview = vae_decode_fake(preview)
653
-
654
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
655
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
656
-
657
- if stream.input_queue.top() == 'end':
658
- stream.output_queue.push(('end', None))
659
- raise KeyboardInterrupt('User ends the task.')
660
-
661
- current_step = d['i'] + 1
662
- percentage = int(100.0 * current_step / steps)
663
- hint = f'Sampling {current_step}/{steps}'
664
- desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Resolution: {height}px * {width}px, Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
665
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
666
- return
 
 
 
 
667
 
668
  for idx in range(batch):
669
  if batch > 1:
@@ -813,27 +823,28 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
813
  if not high_vram:
814
  unload_complete_models()
815
 
816
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
817
-
818
- # 20250506 pftq: Use input video FPS for output
819
- save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
820
- print(f"Latest video saved: {output_filename}")
821
- # 20250508 pftq: Save prompt to mp4 metadata comments
822
- set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompts} | Negative Prompt: {n_prompt}");
823
- print(f"Prompt saved to mp4 metadata comments: {output_filename}")
824
-
825
- # 20250506 pftq: Clean up previous partial files
826
- if previous_video is not None and os.path.exists(previous_video):
827
- try:
828
- os.remove(previous_video)
829
- print(f"Previous partial video deleted: {previous_video}")
830
- except Exception as e:
831
- print(f"Error deleting previous partial video {previous_video}: {e}")
832
- previous_video = output_filename
833
-
834
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
835
-
836
- stream.output_queue.push(('file', output_filename))
 
837
 
838
  seed = (seed + 1) % np.iinfo(np.int32).max
839
 
@@ -848,15 +859,15 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
848
  stream.output_queue.push(('end', None))
849
  return
850
 
851
- def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
852
  global total_second_length_debug_value
853
  if total_second_length_debug_value is not None:
854
  return min(total_second_length_debug_value * 60 * 10, 600)
855
- return total_second_length * 60 * (0.7 if use_teacache else 2) * (2**((resolution - 640) / 640))
856
 
857
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
858
  @spaces.GPU(duration=get_duration_video)
859
- def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
860
  global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
861
 
862
  if torch.cuda.device_count() == 0:
@@ -896,7 +907,7 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
896
  stream = AsyncStream()
897
 
898
  # 20250506 pftq: Pass num_clean_frames, vae_batch, etc
899
- async_run(worker_video, input_video, prompts, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
900
 
901
  output_filename = None
902
 
@@ -1007,6 +1018,7 @@ with block:
1007
  end_button = gr.Button(elem_id="end-button", value="End Generation", variant="stop", interactive=False)
1008
 
1009
  with gr.Accordion("Advanced settings", open=False):
 
1010
  use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.')
1011
 
1012
  n_prompt = gr.Textbox(label="Negative Prompt", value="Missing arm, unrealistic position, impossible contortion, blurred, blurry", info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
@@ -1065,8 +1077,8 @@ with block:
1065
  progress_bar = gr.HTML('', elem_classes='no-generating-animation')
1066
 
1067
  # 20250506 pftq: Updated inputs to include num_clean_frames
1068
- ips = [input_image, final_prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
1069
- ips_video = [input_video, final_prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
1070
 
1071
  def save_preferences(preferences, value):
1072
  preferences["generation-mode"] = value
@@ -1123,6 +1135,7 @@ with block:
1123
  10.0, # gs
1124
  0.0, # rs
1125
  6, # gpu_memory_preservation
 
1126
  False, # use_teacache
1127
  16 # mp4_crf
1128
  ],
@@ -1141,6 +1154,7 @@ with block:
1141
  10.0, # gs
1142
  0.0, # rs
1143
  6, # gpu_memory_preservation
 
1144
  False, # use_teacache
1145
  16 # mp4_crf
1146
  ],
@@ -1170,6 +1184,7 @@ with block:
1170
  10.0, # gs
1171
  0.0, # rs
1172
  6, # gpu_memory_preservation
 
1173
  False, # use_teacache
1174
  False, # no_resize
1175
  16, # mp4_crf
@@ -1191,6 +1206,7 @@ with block:
1191
  10.0, # gs
1192
  0.0, # rs
1193
  6, # gpu_memory_preservation
 
1194
  False, # use_teacache
1195
  False, # no_resize
1196
  16, # mp4_crf
@@ -1222,6 +1238,7 @@ with block:
1222
  10.0, # gs
1223
  0.0, # rs
1224
  6, # gpu_memory_preservation
 
1225
  False, # use_teacache
1226
  16 # mp4_crf
1227
  ]
@@ -1250,6 +1267,7 @@ with block:
1250
  10.0, # gs
1251
  0.0, # rs
1252
  6, # gpu_memory_preservation
 
1253
  False, # use_teacache
1254
  False, # no_resize
1255
  16, # mp4_crf
 
305
  return False
306
 
307
  @torch.no_grad()
308
+ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
309
  def encode_prompt(prompt, n_prompt):
310
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
311
 
 
398
  history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
399
  total_generated_latent_frames = 1
400
 
401
+ if enable_preview:
402
+ def callback(d):
403
+ preview = d['denoised']
404
+ preview = vae_decode_fake(preview)
405
+
406
+ preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
407
+ preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
408
+
409
+ if stream.input_queue.top() == 'end':
410
+ stream.output_queue.push(('end', None))
411
+ raise KeyboardInterrupt('User ends the task.')
412
+
413
+ current_step = d['i'] + 1
414
+ percentage = int(100.0 * current_step / steps)
415
+ hint = f'Sampling {current_step}/{steps}'
416
+ desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30), Resolution: {height}px * {width}px. The video is being extended now ...'
417
+ stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
418
+ return
419
+ else:
420
+ def callback(d):
421
+ return
422
 
423
  indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
424
  clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
 
498
  if not high_vram:
499
  unload_complete_models()
500
 
501
+ if enable_preview or section_index == total_latent_sections - 1:
502
+ output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
503
 
504
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
505
 
506
+ print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
507
 
508
+ stream.output_queue.push(('file', output_filename))
509
  except:
510
  traceback.print_exc()
511
 
 
517
  stream.output_queue.push(('end', None))
518
  return
519
 
520
+ def get_duration(input_image, prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
521
  global total_second_length_debug_value
522
 
523
  if total_second_length_debug_value is not None:
524
  return min(total_second_length_debug_value * 60 * 10, 600)
525
+ return total_second_length * 60 * (0.7 if use_teacache else 1.3) * (2**((resolution - 640) / 640)) * (1 + ((steps - 25) / 100))
526
 
527
 
528
  @spaces.GPU(duration=get_duration)
 
539
  gs=10.0,
540
  rs=0.0,
541
  gpu_memory_preservation=6,
542
+ enable_preview=True,
543
  use_teacache=False,
544
  mp4_crf=16
545
  ):
 
571
 
572
  stream = AsyncStream()
573
 
574
+ async_run(worker, input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf)
575
 
576
  output_filename = None
577
 
 
593
  # 20250506 pftq: Modified worker to accept video input and clean frame count
594
  @spaces.GPU()
595
  @torch.no_grad()
596
+ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
597
  def encode_prompt(prompt, n_prompt):
598
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
599
 
 
653
  total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
654
  total_latent_sections = int(max(round(total_latent_sections), 1))
655
 
656
+ if enable_preview:
657
+ def callback(d):
658
+ preview = d['denoised']
659
+ preview = vae_decode_fake(preview)
660
+
661
+ preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
662
+ preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
663
+
664
+ if stream.input_queue.top() == 'end':
665
+ stream.output_queue.push(('end', None))
666
+ raise KeyboardInterrupt('User ends the task.')
667
+
668
+ current_step = d['i'] + 1
669
+ percentage = int(100.0 * current_step / steps)
670
+ hint = f'Sampling {current_step}/{steps}'
671
+ desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Resolution: {height}px * {width}px, Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
672
+ stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
673
+ return
674
+ else:
675
+ def callback(d):
676
+ return
677
 
678
  for idx in range(batch):
679
  if batch > 1:
 
823
  if not high_vram:
824
  unload_complete_models()
825
 
826
+ if enable_preview or section_index == total_latent_sections - 1:
827
+ output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
828
+
829
+ # 20250506 pftq: Use input video FPS for output
830
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
831
+ print(f"Latest video saved: {output_filename}")
832
+ # 20250508 pftq: Save prompt to mp4 metadata comments
833
+ set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompts} | Negative Prompt: {n_prompt}");
834
+ print(f"Prompt saved to mp4 metadata comments: {output_filename}")
835
+
836
+ # 20250506 pftq: Clean up previous partial files
837
+ if previous_video is not None and os.path.exists(previous_video):
838
+ try:
839
+ os.remove(previous_video)
840
+ print(f"Previous partial video deleted: {previous_video}")
841
+ except Exception as e:
842
+ print(f"Error deleting previous partial video {previous_video}: {e}")
843
+ previous_video = output_filename
844
+
845
+ print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
846
+
847
+ stream.output_queue.push(('file', output_filename))
848
 
849
  seed = (seed + 1) % np.iinfo(np.int32).max
850
 
 
859
  stream.output_queue.push(('end', None))
860
  return
861
 
862
+ def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
863
  global total_second_length_debug_value
864
  if total_second_length_debug_value is not None:
865
  return min(total_second_length_debug_value * 60 * 10, 600)
866
+ return total_second_length * 60 * (0.7 if use_teacache else 2) * (2**((resolution - 640) / 640)) * (1 + ((steps - 25) / 100))
867
 
868
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
869
  @spaces.GPU(duration=get_duration_video)
870
+ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
871
  global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
872
 
873
  if torch.cuda.device_count() == 0:
 
907
  stream = AsyncStream()
908
 
909
  # 20250506 pftq: Pass num_clean_frames, vae_batch, etc
910
+ async_run(worker_video, input_video, prompts, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
911
 
912
  output_filename = None
913
 
 
1018
  end_button = gr.Button(elem_id="end-button", value="End Generation", variant="stop", interactive=False)
1019
 
1020
  with gr.Accordion("Advanced settings", open=False):
1021
+ enable_preview = gr.Checkbox(label='Enable preview', value=True, info='Display a preview around each second generated but it costs 2 sec. for each second generated.')
1022
  use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.')
1023
 
1024
  n_prompt = gr.Textbox(label="Negative Prompt", value="Missing arm, unrealistic position, impossible contortion, blurred, blurry", info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
 
1077
  progress_bar = gr.HTML('', elem_classes='no-generating-animation')
1078
 
1079
  # 20250506 pftq: Updated inputs to include num_clean_frames
1080
+ ips = [input_image, final_prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf]
1081
+ ips_video = [input_video, final_prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
1082
 
1083
  def save_preferences(preferences, value):
1084
  preferences["generation-mode"] = value
 
1135
  10.0, # gs
1136
  0.0, # rs
1137
  6, # gpu_memory_preservation
1138
+ False, # enable_preview
1139
  False, # use_teacache
1140
  16 # mp4_crf
1141
  ],
 
1154
  10.0, # gs
1155
  0.0, # rs
1156
  6, # gpu_memory_preservation
1157
+ False, # enable_preview
1158
  False, # use_teacache
1159
  16 # mp4_crf
1160
  ],
 
1184
  10.0, # gs
1185
  0.0, # rs
1186
  6, # gpu_memory_preservation
1187
+ False, # enable_preview
1188
  False, # use_teacache
1189
  False, # no_resize
1190
  16, # mp4_crf
 
1206
  10.0, # gs
1207
  0.0, # rs
1208
  6, # gpu_memory_preservation
1209
+ False, # enable_preview
1210
  False, # use_teacache
1211
  False, # no_resize
1212
  16, # mp4_crf
 
1238
  10.0, # gs
1239
  0.0, # rs
1240
  6, # gpu_memory_preservation
1241
+ False, # enable_preview
1242
  False, # use_teacache
1243
  16 # mp4_crf
1244
  ]
 
1267
  10.0, # gs
1268
  0.0, # rs
1269
  6, # gpu_memory_preservation
1270
+ False, # enable_preview
1271
  False, # use_teacache
1272
  False, # no_resize
1273
  16, # mp4_crf