garyuzair commited on
Commit
9855282
·
verified ·
1 Parent(s): 350d7aa

Update src/app_hf_space_optimized.py

Browse files
Files changed (1) hide show
  1. src/app_hf_space_optimized.py +23 -12
src/app_hf_space_optimized.py CHANGED
@@ -22,7 +22,15 @@ import shutil
22
  import traceback
23
  import psutil # For memory stats
24
 
25
- # Use a more explicit title indicating vertical format
 
 
 
 
 
 
 
 
26
  st.set_page_config(layout="wide", page_title="POV Vertical Video Gen (HF Space)")
27
 
28
  # --- Configuration ---
@@ -48,16 +56,19 @@ def display_memory_usage():
48
  """Displays CPU and GPU memory usage in the sidebar."""
49
  try:
50
  process = psutil.Process(os.getpid())
51
- cpu_mem = process.memory_info().rss / (1024 * 1024) # MB
52
- gpu_mem_info = "N/A"
 
 
 
53
  if torch.cuda.is_available():
54
  # Get current and peak allocated memory
55
  allocated = torch.cuda.memory_allocated(0) / (1024 * 1024) # MB
56
- # reserved = torch.cuda.memory_reserved(0) / (1024 * 1024) # MB # Reserved is less critical than allocated/peak
57
  peak_allocated = torch.cuda.max_memory_allocated(0) / (1024 * 1024) # MB
58
  total = torch.cuda.get_device_properties(0).total_memory / (1024 * 1024) # MB
59
- gpu_mem_info = f"Alloc: {allocated:.0f}MB | Peak Alloc: {peak_allocated:.0f}MB | Total: {total:.0f}MB"
60
- mem_info_placeholder.info(f"🧠 CPU Mem: {cpu_mem:.0f} MB\n⚡ GPU Mem: {gpu_mem_info}")
 
61
  except Exception as e:
62
  mem_info_placeholder.warning(f"Could not get memory info: {e}")
63
 
@@ -602,7 +613,7 @@ def run_compose_step_ffmpeg(video_results, audio_results, temp_dir, title="final
602
  # Specify the input base directory for relative paths using -i
603
  (
604
  ffmpeg
605
- .input(f"concat:{concat_video_list_path}", format='concat', safe=0, fflags='+igndts', enable_cuda=True) # Use concat protocol, add cuda if available
606
  .output(long_video_path, c='copy') # Use stream copy for speed - assumes inputs are compatible codecs/formats
607
  .global_args('-hide_banner', '-loglevel', 'error') # Suppress verbose output, show only errors
608
  # Run the command from the temporary directory for relative paths to work
@@ -738,7 +749,7 @@ with st.sidebar:
738
 
739
  # Add warning about scene count on CPU
740
  if device == "cpu" and num_scenes_req > 2: # Arbitrary threshold, but 3+ scenes are very risky on CPU
741
- st.warning(f"⚠️ Generating {num_scenes_req} scenes on CPU is highly likely to fail due to resource limits.")
742
 
743
 
744
  # Display target dimensions and duration clearly
@@ -760,14 +771,14 @@ with st.sidebar:
760
  st.session_state.num_scenes = num_scenes_req # Store the user-requested number of scenes
761
  cleanup_temp_dir() # Clean old files before starting a new run
762
  get_temp_dir() # Ensure a new temp dir path is set for this run
763
- st.experimental_rerun() # Trigger a rerun to enter the generation loop
764
 
765
  st.header("⚠️ Actions")
766
  # Reset workflow button - disabled if generation is in progress
767
  if st.button("🔁 Reset Workflow", disabled=st.session_state.generation_in_progress):
768
  init_state() # Reset all session state
769
  cleanup_temp_dir() # Also clean files on reset
770
- st.experimental_rerun() # Rerun to update UI state and exit generation loop
771
 
772
  # Clean temp files button - disabled if generation is in progress
773
  cleanup_button_help = f"Removes files in: {st.session_state.get('temp_dir_path', 'N/A')}"
@@ -875,7 +886,7 @@ if st.session_state.generation_in_progress:
875
  # The progress bar update to 100% for 'done' is handled above based on state
876
  # Trigger a rerun. Streamlit will reload the script, and the logic will continue
877
  # from the new st.session_state.current_step.
878
- st.experimental_rerun()
879
 
880
  # If next_step is the same as current_step, it implies an error occurred *within* the step's
881
  # execution that set next_step to "error", and the logic above decided not to rerun.
@@ -893,7 +904,7 @@ if st.session_state.generation_in_progress:
893
  st.session_state.current_step = "error" # Set state to error
894
  st.session_state.generation_in_progress = False # Stop generation
895
  progress_bar.progress(0) # Reset progress bar on error
896
- st.experimental_rerun() # Rerun to show the error state UI and stop execution flow
897
 
898
 
899
  # --- Display Final Output ---
 
22
  import traceback
23
  import psutil # For memory stats
24
 
25
+ # --- Set environment variable for Hugging Face cache ---
26
+ # This tells Hugging Face libraries where to store downloaded models
27
+ # /tmp is typically writable on HF Spaces
28
+ HF_CACHE_DIR = "/tmp/hf_cache"
29
+ os.environ['HUGGINGFACE_HUB_CACHE'] = HF_CACHE_DIR
30
+ # Create the cache directory if it doesn't exist
31
+ os.makedirs(HF_CACHE_DIR, exist_ok=True)
32
+
33
+
34
  st.set_page_config(layout="wide", page_title="POV Vertical Video Gen (HF Space)")
35
 
36
  # --- Configuration ---
 
56
  """Displays CPU and GPU memory usage in the sidebar."""
57
  try:
58
  process = psutil.Process(os.getpid())
59
+ cpu_mem = process.memory_info().rss / (1600 * 1024) # MB (approx max free tier RAM)
60
+ cpu_mem_percent = (process.memory_info().rss / (1600 * 1024 * 1024)) * 100 # Percentage of 1.6GB
61
+ cpu_text = f"🧠 CPU Mem: {cpu_mem:.0f} MB ({cpu_mem_percent:.0f}%)"
62
+
63
+ gpu_mem_info = "N/A (No GPU)"
64
  if torch.cuda.is_available():
65
  # Get current and peak allocated memory
66
  allocated = torch.cuda.memory_allocated(0) / (1024 * 1024) # MB
 
67
  peak_allocated = torch.cuda.max_memory_allocated(0) / (1024 * 1024) # MB
68
  total = torch.cuda.get_device_properties(0).total_memory / (1024 * 1024) # MB
69
+ gpu_mem_info = f"⚡ GPU Mem: Alloc: {allocated:.0f}MB | Peak Alloc: {peak_allocated:.0f}MB | Total: {total:.0f}MB"
70
+
71
+ mem_info_placeholder.info(f"{cpu_text}\n{gpu_mem_info}")
72
  except Exception as e:
73
  mem_info_placeholder.warning(f"Could not get memory info: {e}")
74
 
 
613
  # Specify the input base directory for relative paths using -i
614
  (
615
  ffmpeg
616
+ .input(f"concat:{concat_video_list_path}", format='concat', safe=0, fflags='+igndts')
617
  .output(long_video_path, c='copy') # Use stream copy for speed - assumes inputs are compatible codecs/formats
618
  .global_args('-hide_banner', '-loglevel', 'error') # Suppress verbose output, show only errors
619
  # Run the command from the temporary directory for relative paths to work
 
749
 
750
  # Add warning about scene count on CPU
751
  if device == "cpu" and num_scenes_req > 2: # Arbitrary threshold, but 3+ scenes are very risky on CPU
752
+ st.warning(f"⚠️ Requesting {num_scenes_req} scenes on CPU is highly likely to exceed resource limits and fail.")
753
 
754
 
755
  # Display target dimensions and duration clearly
 
771
  st.session_state.num_scenes = num_scenes_req # Store the user-requested number of scenes
772
  cleanup_temp_dir() # Clean old files before starting a new run
773
  get_temp_dir() # Ensure a new temp dir path is set for this run
774
+ st.rerun() # *** UPDATED *** Trigger a rerun to enter the generation loop
775
 
776
  st.header("⚠️ Actions")
777
  # Reset workflow button - disabled if generation is in progress
778
  if st.button("🔁 Reset Workflow", disabled=st.session_state.generation_in_progress):
779
  init_state() # Reset all session state
780
  cleanup_temp_dir() # Also clean files on reset
781
+ st.rerun() # *** UPDATED *** Rerun to update UI state and exit generation loop
782
 
783
  # Clean temp files button - disabled if generation is in progress
784
  cleanup_button_help = f"Removes files in: {st.session_state.get('temp_dir_path', 'N/A')}"
 
886
  # The progress bar update to 100% for 'done' is handled above based on state
887
  # Trigger a rerun. Streamlit will reload the script, and the logic will continue
888
  # from the new st.session_state.current_step.
889
+ st.rerun() # *** UPDATED *** Trigger a rerun
890
 
891
  # If next_step is the same as current_step, it implies an error occurred *within* the step's
892
  # execution that set next_step to "error", and the logic above decided not to rerun.
 
904
  st.session_state.current_step = "error" # Set state to error
905
  st.session_state.generation_in_progress = False # Stop generation
906
  progress_bar.progress(0) # Reset progress bar on error
907
+ st.rerun() # *** UPDATED *** Rerun to show the error state UI and stop execution flow
908
 
909
 
910
  # --- Display Final Output ---