garyuzair commited on
Commit
dd0a729
·
verified ·
1 Parent(s): b4d330b

Upload 6 files

Browse files
Files changed (1) hide show
  1. app.py +16 -11
app.py CHANGED
@@ -373,11 +373,11 @@ def main():
373
  gc.collect()
374
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
375
 
376
- # Apply VRAM optimization settings
 
 
 
377
  if vram_optimization:
378
- # Set image generator to use VRAM optimization
379
- image_generator.set_vram_optimization(True)
380
-
381
  # Set lower inference steps when VRAM optimization is enabled
382
  if inference_steps > 25:
383
  inference_steps = 25
@@ -400,6 +400,10 @@ def main():
400
  animator.set_aspect_ratio(selected_aspect_ratio)
401
  video_creator.set_aspect_ratio(selected_aspect_ratio)
402
 
 
 
 
 
403
  # Set maximum segment duration
404
  transcriber.set_max_segment_duration(max_segment_duration)
405
  video_creator.set_max_segment_duration(max_segment_duration)
@@ -452,7 +456,7 @@ def main():
452
  transcriptions.append("")
453
 
454
  # Force garbage collection after transcription
455
- if memory_optimization:
456
  gc.collect()
457
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
458
 
@@ -502,7 +506,7 @@ def main():
502
  status_message.markdown("🎨 **Creating images...**")
503
 
504
  # For memory optimization, process in smaller batches even with parallel processing
505
- if memory_optimization:
506
  batch_size = 2 # Process only 2 images at a time to conserve memory
507
  images = []
508
 
@@ -551,7 +555,7 @@ def main():
551
  st.image(img_path, caption=f"Image {i+1}", use_column_width=True)
552
 
553
  # Force garbage collection after image generation
554
- if memory_optimization:
555
  gc.collect()
556
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
557
 
@@ -560,7 +564,7 @@ def main():
560
  status_message.markdown("✨ **Adding animations...**")
561
 
562
  # For memory optimization, process in smaller batches
563
- if memory_optimization:
564
  batch_size = 3 # Process only 3 animations at a time
565
  animated_frames = []
566
 
@@ -613,7 +617,7 @@ def main():
613
  progress_bar.progress(80)
614
 
615
  # Force garbage collection before video creation
616
- if memory_optimization:
617
  gc.collect()
618
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
619
 
@@ -643,7 +647,7 @@ def main():
643
  output_video = video_creator.optimize_video(
644
  output_video,
645
  bitrate=bitrate,
646
- threads=2 if memory_optimization else max_workers # Use fewer threads for memory optimization
647
  )
648
 
649
  # Cache the result if caching is enabled
@@ -678,6 +682,7 @@ def main():
678
  - Number of Segments: {len(audio_segments)}
679
  - Parallel Processing: {'Enabled' if parallel_processing else 'Disabled'}
680
  - Memory Optimization: {'Enabled' if memory_optimization else 'Disabled'}
 
681
  - Workers: {max_workers}
682
  - Image Size: {actual_image_size[0]}x{actual_image_size[1]}
683
  - Inference Steps: {inference_steps}
@@ -694,7 +699,7 @@ def main():
694
  pass
695
 
696
  # Final garbage collection
697
- if memory_optimization:
698
  gc.collect()
699
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
700
 
 
373
  gc.collect()
374
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
375
 
376
+ # Store VRAM optimization settings to apply after initialization
377
+ apply_vram_optimization = vram_optimization
378
+
379
+ # Adjust parameters for VRAM optimization if enabled
380
  if vram_optimization:
 
 
 
381
  # Set lower inference steps when VRAM optimization is enabled
382
  if inference_steps > 25:
383
  inference_steps = 25
 
400
  animator.set_aspect_ratio(selected_aspect_ratio)
401
  video_creator.set_aspect_ratio(selected_aspect_ratio)
402
 
403
+ # Apply VRAM optimization if enabled
404
+ if apply_vram_optimization:
405
+ image_generator.set_vram_optimization(True)
406
+
407
  # Set maximum segment duration
408
  transcriber.set_max_segment_duration(max_segment_duration)
409
  video_creator.set_max_segment_duration(max_segment_duration)
 
456
  transcriptions.append("")
457
 
458
  # Force garbage collection after transcription
459
+ if memory_optimization or apply_vram_optimization:
460
  gc.collect()
461
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
462
 
 
506
  status_message.markdown("🎨 **Creating images...**")
507
 
508
  # For memory optimization, process in smaller batches even with parallel processing
509
+ if memory_optimization or apply_vram_optimization:
510
  batch_size = 2 # Process only 2 images at a time to conserve memory
511
  images = []
512
 
 
555
  st.image(img_path, caption=f"Image {i+1}", use_column_width=True)
556
 
557
  # Force garbage collection after image generation
558
+ if memory_optimization or apply_vram_optimization:
559
  gc.collect()
560
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
561
 
 
564
  status_message.markdown("✨ **Adding animations...**")
565
 
566
  # For memory optimization, process in smaller batches
567
+ if memory_optimization or apply_vram_optimization:
568
  batch_size = 3 # Process only 3 animations at a time
569
  animated_frames = []
570
 
 
617
  progress_bar.progress(80)
618
 
619
  # Force garbage collection before video creation
620
+ if memory_optimization or apply_vram_optimization:
621
  gc.collect()
622
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
623
 
 
647
  output_video = video_creator.optimize_video(
648
  output_video,
649
  bitrate=bitrate,
650
+ threads=2 if memory_optimization or apply_vram_optimization else max_workers # Use fewer threads for optimization
651
  )
652
 
653
  # Cache the result if caching is enabled
 
682
  - Number of Segments: {len(audio_segments)}
683
  - Parallel Processing: {'Enabled' if parallel_processing else 'Disabled'}
684
  - Memory Optimization: {'Enabled' if memory_optimization else 'Disabled'}
685
+ - VRAM Optimization: {'Enabled' if apply_vram_optimization else 'Disabled'}
686
  - Workers: {max_workers}
687
  - Image Size: {actual_image_size[0]}x{actual_image_size[1]}
688
  - Inference Steps: {inference_steps}
 
699
  pass
700
 
701
  # Final garbage collection
702
+ if memory_optimization or apply_vram_optimization:
703
  gc.collect()
704
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
705