MogensR commited on
Commit
c933bf3
·
verified ·
1 Parent(s): c957b26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -1
app.py CHANGED
@@ -161,6 +161,8 @@ def generate_mask_from_video_first_frame(video_path, sam2_predictor):
161
  def stage1_create_transparent_video(input_file):
162
  """STAGE 1: Create transparent video using SAM2 + MatAnyone."""
163
 
 
 
164
  memory_info = get_memory_usage()
165
  if memory_info.get('gpu_free', 0) < 2.0:
166
  st.warning("⚠️ Low GPU memory detected. Processing may be slower.")
@@ -174,17 +176,26 @@ def update_progress(progress, message):
174
  progress = max(0, min(1, progress))
175
  progress_bar.progress(progress)
176
  status_text.text(f"Stage 1: {message} | GPU: {get_memory_usage().get('gpu_allocated', 0):.1f}GB")
 
177
 
178
  # Load models
179
  update_progress(0.05, "Loading SAM2 model...")
 
180
  sam2_predictor = load_sam2_predictor()
181
  if sam2_predictor is None:
 
 
182
  return None
 
183
 
184
  update_progress(0.1, "Loading MatAnyone model...")
 
185
  matanyone_processor = load_matanyone_processor()
186
  if matanyone_processor is None:
 
 
187
  return None
 
188
 
189
  # Process video to create transparent version
190
  with tempfile.TemporaryDirectory() as temp_dir:
@@ -242,12 +253,22 @@ def update_progress(progress, message):
242
 
243
  except Exception as e:
244
  logger.error(f"Error in Stage 1 processing: {str(e)}", exc_info=True)
245
- st.error(f"Stage 1 failed: {str(e)}")
 
 
 
 
 
 
 
 
246
  return None
247
  finally:
 
248
  if torch.cuda.is_available():
249
  torch.cuda.empty_cache()
250
  gc.collect()
 
251
 
252
  def create_transparent_mov(foreground_path, alpha_path, temp_dir):
253
  """Create a .mov file with alpha channel from foreground and alpha videos."""
@@ -433,6 +454,25 @@ def show_memory_info():
433
  f"Free: {memory_info['gpu_free']:.1f}GB")
434
  st.metric("RAM Usage", f"{memory_info['ram_used']:.1f}GB",
435
  f"Available: {memory_info['ram_available']:.1f}GB")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  if st.button("🧹 Clear Cache", help="Free up memory by clearing model cache"):
437
  clear_model_cache()
438
  st.success("Cache cleared!")
 
161
  def stage1_create_transparent_video(input_file):
162
  """STAGE 1: Create transparent video using SAM2 + MatAnyone."""
163
 
164
+ logger.info("Starting Stage 1: Create transparent video")
165
+
166
  memory_info = get_memory_usage()
167
  if memory_info.get('gpu_free', 0) < 2.0:
168
  st.warning("⚠️ Low GPU memory detected. Processing may be slower.")
 
176
  progress = max(0, min(1, progress))
177
  progress_bar.progress(progress)
178
  status_text.text(f"Stage 1: {message} | GPU: {get_memory_usage().get('gpu_allocated', 0):.1f}GB")
179
+ logger.info(f"Stage 1 Progress: {progress:.2f} - {message}")
180
 
181
  # Load models
182
  update_progress(0.05, "Loading SAM2 model...")
183
+ logger.info("Attempting to load SAM2 predictor...")
184
  sam2_predictor = load_sam2_predictor()
185
  if sam2_predictor is None:
186
+ logger.error("SAM2 predictor failed to load")
187
+ st.error("❌ Failed to load SAM2 model")
188
  return None
189
+ logger.info("SAM2 predictor loaded successfully")
190
 
191
  update_progress(0.1, "Loading MatAnyone model...")
192
+ logger.info("Attempting to load MatAnyone processor...")
193
  matanyone_processor = load_matanyone_processor()
194
  if matanyone_processor is None:
195
+ logger.error("MatAnyone processor failed to load")
196
+ st.error("❌ Failed to load MatAnyone model")
197
  return None
198
+ logger.info("MatAnyone processor loaded successfully")
199
 
200
  # Process video to create transparent version
201
  with tempfile.TemporaryDirectory() as temp_dir:
 
253
 
254
  except Exception as e:
255
  logger.error(f"Error in Stage 1 processing: {str(e)}", exc_info=True)
256
+ st.error(f"Stage 1 failed: {str(e)}")
257
+
258
+ # Show additional debug info
259
+ try:
260
+ memory_info = get_memory_usage()
261
+ st.info(f"Memory at failure - GPU: {memory_info.get('gpu_allocated', 0):.1f}GB, RAM: {memory_info.get('ram_used', 0):.1f}GB")
262
+ except:
263
+ pass
264
+
265
  return None
266
  finally:
267
+ logger.info("Stage 1 cleanup starting...")
268
  if torch.cuda.is_available():
269
  torch.cuda.empty_cache()
270
  gc.collect()
271
+ logger.info("Stage 1 cleanup completed")
272
 
273
  def create_transparent_mov(foreground_path, alpha_path, temp_dir):
274
  """Create a .mov file with alpha channel from foreground and alpha videos."""
 
454
  f"Free: {memory_info['gpu_free']:.1f}GB")
455
  st.metric("RAM Usage", f"{memory_info['ram_used']:.1f}GB",
456
  f"Available: {memory_info['ram_available']:.1f}GB")
457
+
458
+ # Test model loading
459
+ if st.button("🧪 Test Models", help="Test if SAM2 and MatAnyone can load"):
460
+ with st.spinner("Testing model loading..."):
461
+ try:
462
+ sam2_test = load_sam2_predictor()
463
+ if sam2_test:
464
+ st.success("✅ SAM2 loads successfully")
465
+ else:
466
+ st.error("❌ SAM2 failed to load")
467
+
468
+ matanyone_test = load_matanyone_processor()
469
+ if matanyone_test:
470
+ st.success("✅ MatAnyone loads successfully")
471
+ else:
472
+ st.error("❌ MatAnyone failed to load")
473
+ except Exception as e:
474
+ st.error(f"Model test failed: {e}")
475
+
476
  if st.button("🧹 Clear Cache", help="Free up memory by clearing model cache"):
477
  clear_model_cache()
478
  st.success("Cache cleared!")