MogensR commited on
Commit
ce188b1
Β·
verified Β·
1 Parent(s): cf67533

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +271 -261
app.py CHANGED
@@ -1,295 +1,305 @@
1
- #!/usr/bin/env python3
2
- """
3
- MyAvatar Video Background Replacer - Streamlit UI
4
- Main interface for two-stage video processing pipeline
5
- """
6
-
7
  import streamlit as st
8
- import sys
 
 
 
 
 
9
  from pathlib import Path
10
- from PIL import Image
11
-
12
- # Add project root to path
13
- sys.path.append(str(Path(__file__).parent.absolute()))
14
-
15
- # Import processing modules
16
- from model_loaders import load_sam2_predictor, load_matanyone_processor, clear_model_cache, get_memory_usage
17
- from video_pipeline import stage1_create_transparent_video, stage2_composite_background
18
 
19
- # Persistent temp dir
20
- TMP_DIR = Path("tmp")
21
- TMP_DIR.mkdir(parents=True, exist_ok=True)
22
-
23
- # Page config
24
- st.set_page_config(
25
- page_title="MyAvatar - Video Background Replacer",
26
- page_icon="πŸŽ₯",
27
- layout="wide",
28
- initial_sidebar_state="expanded"
29
  )
 
30
 
31
- # Styling
32
- def add_logo():
33
- st.markdown(
34
- """
35
- <style>
36
- .main .block-container { padding-top: 2rem; padding-bottom: 2rem; }
37
- .stButton>button { width: 100%; background-color: #4CAF50; color: white; font-weight: bold; transition: all 0.3s; }
38
- .stButton>button:hover { background-color: #45a049; }
39
- .stProgress > div > div > div > div { background-color: #4CAF50; }
40
- .stage-indicator { background: linear-gradient(90deg, #4CAF50, #45a049); color: white; padding: 10px; border-radius: 5px; margin: 10px 0; text-align: center; font-weight: bold; }
41
- </style>
42
- """,
43
- unsafe_allow_html=True
44
- )
45
 
46
- def show_memory_info():
47
- """Display memory usage in sidebar with model testing."""
48
- memory_info = get_memory_usage()
49
- with st.sidebar:
50
- st.markdown("### 🧠 Memory Usage")
51
- if 'gpu_allocated' in memory_info:
52
- st.metric("GPU Memory", f"{memory_info['gpu_allocated']:.1f}GB",
53
- f"Free: {memory_info['gpu_free']:.1f}GB")
54
- st.metric("RAM Usage", f"{memory_info['ram_used']:.1f}GB",
55
- f"Available: {memory_info['ram_available']:.1f}GB")
56
-
57
- if st.button("πŸ§ͺ Test Models", help="Test if SAM2 and MatAnyone can load"):
58
- with st.spinner("Testing model loading..."):
59
- try:
60
- sam2_test = load_sam2_predictor()
61
- st.success("βœ… SAM2 loads successfully") if sam2_test else st.error("❌ SAM2 failed to load")
62
-
63
- matanyone_test = load_matanyone_processor()
64
- st.success("βœ… MatAnyone loads successfully") if matanyone_test else st.error("❌ MatAnyone failed to load")
65
- except Exception as e:
66
- st.error(f"Model test failed: {e}")
67
-
68
- if st.button("🧹 Clear Cache", help="Free up memory by clearing model cache"):
69
- clear_model_cache()
70
- st.success("Cache cleared!")
71
- st.experimental_rerun()
72
 
73
- def initialize_session_state():
74
- """Initialize all session state variables."""
75
- defaults = {
76
- 'uploaded_video': None,
77
- 'bg_image': None,
78
- 'bg_image_info': None,
79
- 'bg_color': "#00FF00",
80
- 'bg_type': "image",
81
- 'transparent_video_path': None,
82
- 'final_video_path': None,
83
- 'processing_stage1': False,
84
- 'processing_stage2': False
85
- }
86
- for key, value in defaults.items():
87
- if key not in st.session_state:
88
- st.session_state[key] = value
89
 
90
- def handle_video_upload():
91
- """Handle video file upload."""
92
- uploaded = st.file_uploader(
93
- "πŸ“Ή Upload Video",
94
- type=["mp4", "mov", "avi", "mkv"],
95
- key="video_uploader",
96
- help="Recommended: Videos under 30 seconds for faster processing"
97
- )
98
- if uploaded is not None:
99
- file_size_mb = uploaded.size / (1024 * 1024)
100
- if file_size_mb > 100:
101
- st.warning(f"⚠️ Large file detected ({file_size_mb:.1f}MB). Processing may take longer.")
102
- st.session_state.uploaded_video = uploaded
103
- st.session_state.transparent_video_path = None
104
- st.session_state.final_video_path = None
105
-
106
- def show_video_preview():
107
- """Display uploaded video preview."""
108
- st.markdown("### Video Preview")
109
- if st.session_state.uploaded_video is not None:
110
- video_bytes = st.session_state.uploaded_video.getvalue()
111
- st.video(video_bytes)
112
- st.session_state.uploaded_video.seek(0)
113
-
114
- def handle_background_selection():
115
- """Handle background type selection."""
116
- st.markdown("### Background Options")
117
- bg_type = st.radio("Select Background Type:", ["Image", "Color"], horizontal=True, key="bg_type_radio")
118
- st.session_state.bg_type = bg_type.lower()
119
 
120
- if bg_type == "Image":
121
- handle_image_background()
122
- elif bg_type == "Color":
123
- handle_color_background()
124
-
125
- def handle_image_background():
126
- """Handle image background upload and preview."""
127
- bg_image = st.file_uploader("πŸ–ΌοΈ Upload Background Image", type=["jpg", "png", "jpeg"],
128
- key="bg_image_uploader", help="Recommended: Images under 5MB")
129
 
130
- if bg_image is not None:
131
- image_size_mb = bg_image.size / (1024 * 1024)
132
- if image_size_mb > 10:
133
- st.warning(f"⚠️ Large image ({image_size_mb:.1f}MB). Consider resizing.")
 
 
 
 
 
 
 
 
 
 
 
134
 
135
- current_file_info = f"{bg_image.name}_{bg_image.size}"
136
- if st.session_state.bg_image_info != current_file_info:
137
- st.session_state.bg_image = Image.open(bg_image)
138
- st.session_state.bg_image_info = current_file_info
139
- st.session_state.final_video_path = None
140
 
141
- if st.session_state.bg_image is not None:
142
- st.image(st.session_state.bg_image, caption="Selected Background", use_container_width=True)
143
- else:
144
- st.session_state.bg_image = None
145
- st.session_state.bg_image_info = None
146
-
147
- def handle_color_background():
148
- """Handle solid color background selection."""
149
- st.markdown("#### Select a Color")
150
- old_color = st.session_state.get('bg_color', "#00FF00")
151
 
152
- color_presets = {
153
- "Pure White": "#FFFFFF",
154
- "Pure Black": "#000000",
155
- "Light Gray": "#F5F5F5",
156
- "Professional Blue": "#0078D4",
157
- "Corporate Green": "#107C10",
158
- "Custom": old_color
159
- }
 
 
160
 
161
- cols = st.columns(3)
162
- for i, (name, color) in enumerate(color_presets.items()):
163
- with cols[i % 3]:
164
- if name == "Custom":
165
- new_color = st.color_picker("Custom Color", old_color, key="custom_color_picker")
166
- if new_color != old_color:
167
- st.session_state.bg_color = new_color
168
- st.session_state.final_video_path = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  else:
170
- if st.button(name, key=f"color_{name}", use_container_width=True):
171
- st.session_state.bg_color = color
172
- st.session_state.final_video_path = None
173
- st.markdown(f'<div style="background-color:{color}; height:30px; border-radius:4px; margin-top:-10px;"></div>',
174
- unsafe_allow_html=True)
175
-
176
- def main():
177
- """Main application entry point."""
178
- add_logo()
 
 
 
179
 
180
- st.markdown("""
181
- <div style="text-align: center; margin-bottom: 30px;">
182
- <h1>πŸŽ₯ Video Background Replacer</h1>
183
- <p>Two-Stage Processing: SAM2 + MatAnyone β†’ Transparent β†’ Composite</p>
184
- </div>
185
- """, unsafe_allow_html=True)
186
- st.markdown("---")
187
 
188
- initialize_session_state()
189
- show_memory_info()
 
 
 
 
 
 
190
 
191
- col1, col2 = st.columns([1, 1], gap="large")
 
 
192
 
193
- # LEFT COLUMN: Video Upload & Stage 1
194
- with col1:
195
- st.header("1. Upload Video")
196
- handle_video_upload()
197
- show_video_preview()
 
 
198
 
199
- st.markdown('<div class="stage-indicator">STAGE 1: Create Transparent Video</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- stage1_disabled = not st.session_state.uploaded_video or st.session_state.processing_stage1
 
 
 
 
 
 
 
202
 
203
- if st.button("🎭 Create Transparent Video", type="primary", disabled=stage1_disabled,
204
- use_container_width=True, help="Remove background using SAM2 + MatAnyone AI"):
205
- with st.spinner("Stage 1: Creating transparent video..."):
206
- st.session_state.processing_stage1 = True
207
- try:
208
- transparent_path = stage1_create_transparent_video(st.session_state.uploaded_video)
209
- if transparent_path:
210
- st.session_state.transparent_video_path = transparent_path
211
- st.success("βœ… Stage 1 Complete: Transparent video created!")
212
- st.balloons()
213
- else:
214
- st.error("❌ Stage 1 Failed: Could not create transparent video")
215
- except Exception as e:
216
- st.error(f"❌ Stage 1 Error: {str(e)}")
217
- finally:
218
- st.session_state.processing_stage1 = False
219
 
220
- # Show transparent video result
221
- if st.session_state.get('transparent_video_path'):
222
- st.markdown("#### Transparent Video Result")
223
- try:
224
- with open(st.session_state.transparent_video_path, 'rb') as f:
225
- transparent_bytes = f.read()
226
- st.video(transparent_bytes)
227
- st.download_button("πŸ’Ύ Download Transparent Video (.mov)", data=transparent_bytes,
228
- file_name="transparent_video.mov", mime="video/quicktime",
229
- use_container_width=True)
230
- st.caption(f"Size: {len(transparent_bytes) / (1024**2):.1f}MB")
231
- except Exception as e:
232
- st.error(f"Error displaying transparent video: {str(e)}")
233
 
234
- # RIGHT COLUMN: Background Selection & Stage 2
235
- with col2:
236
- st.header("2. Background Settings")
237
- handle_background_selection()
 
238
 
239
- st.markdown('<div class="stage-indicator">STAGE 2: Composite with Background</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
240
 
241
- stage2_disabled = (not st.session_state.get('transparent_video_path') or
242
- st.session_state.processing_stage2 or
243
- (st.session_state.bg_type == "image" and not st.session_state.get('bg_image')))
244
 
245
- if st.button("🎬 Composite Final Video", type="primary", disabled=stage2_disabled,
246
- use_container_width=True, help="Combine transparent video with selected background"):
247
- if st.session_state.bg_type == "image" and not st.session_state.get('bg_image'):
248
- st.error("Please upload a background image first.")
249
- else:
250
- with st.spinner("Stage 2: Compositing with background..."):
251
- st.session_state.processing_stage2 = True
252
- try:
253
- background = st.session_state.bg_image if st.session_state.bg_type == "image" else st.session_state.bg_color
254
- final_path = stage2_composite_background(st.session_state.transparent_video_path,
255
- background, st.session_state.bg_type)
256
- if final_path:
257
- st.session_state.final_video_path = final_path
258
- st.success("βœ… Stage 2 Complete: Final video ready!")
259
- st.balloons()
260
- else:
261
- st.error("❌ Stage 2 Failed: Could not composite video")
262
- except Exception as e:
263
- st.error(f"❌ Stage 2 Error: {str(e)}")
264
- finally:
265
- st.session_state.processing_stage2 = False
 
 
 
 
 
 
 
 
 
266
 
267
- # Show final video result
268
- if st.session_state.get('final_video_path'):
269
- st.markdown("#### Final Video Result")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  try:
271
- with open(st.session_state.final_video_path, 'rb') as f:
272
- final_bytes = f.read()
273
- st.video(final_bytes)
274
- st.download_button("πŸ’Ύ Download Final Video (.mp4)", data=final_bytes,
275
- file_name="final_video.mp4", mime="video/mp4", use_container_width=True)
276
- st.caption(f"Size: {len(final_bytes) / (1024**2):.1f}MB")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  except Exception as e:
278
- st.error(f"Error displaying final video: {str(e)}")
279
-
280
- # Processing tips
281
- with st.expander("πŸ’‘ Two-Stage Processing Tips"):
282
- st.markdown("""
283
- **Stage 1 - Create Transparent Video:**
284
- - Uses SAM2 + MatAnyone AI to remove background
285
- - Creates a .mov file with alpha channel
286
- - Only needs to be done once per video
287
 
288
- **Stage 2 - Composite Background:**
289
- - Fast compositing with your chosen background
290
- - Try multiple backgrounds without re-processing
291
- - Much faster than Stage 1
292
- """)
 
 
 
293
 
294
  if __name__ == "__main__":
295
  main()
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import torch
5
+ import tempfile
6
+ import os
7
+ import logging
8
  from pathlib import Path
9
+ import time
 
 
 
 
 
 
 
10
 
11
+ # Configure logging
12
+ logging.basicConfig(
13
+ level=logging.INFO,
14
+ format='[%(asctime)s] %(levelname)s: %(message)s',
15
+ datefmt='%H:%M:%S'
 
 
 
 
 
16
  )
17
+ logger = logging.getLogger(__name__)
18
 
19
+ # Import model loaders
20
+ from model_loaders import load_sam2, load_matanyone, pose
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ st.set_page_config(page_title="Video Matting", layout="wide")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ def log_and_progress(progress_callback, stage, progress, message):
25
+ """Unified logging and progress reporting"""
26
+ timestamp = time.strftime("%H:%M:%S")
27
+ log_msg = f"[{timestamp}] Stage {stage} ({progress:.0%}): {message}"
28
+ logger.info(log_msg)
29
+ print(log_msg, flush=True)
30
+ progress_callback(stage, progress, message)
 
 
 
 
 
 
 
 
 
31
 
32
+ def process_video(input_path, output_path, progress_callback):
33
+ """Main video processing pipeline - Stage 1: Segmentation, Stage 2: Matting"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ # ============================================================
36
+ # STAGE 1: SEGMENTATION (Load models, read video, segment all frames)
37
+ # ============================================================
38
+ logger.info("="*60)
39
+ logger.info("STAGE 1: PERSON SEGMENTATION")
40
+ logger.info("="*60)
41
+ stage1_start = time.time()
 
 
42
 
43
+ # 1.1: Load SAM2
44
+ log_and_progress(progress_callback, 1, 0.0, "Loading SAM2 model...")
45
+ try:
46
+ sam_predictor = load_sam2()
47
+ logger.info(f"βœ… SAM2 loaded successfully")
48
+ except Exception as e:
49
+ logger.error(f"❌ SAM2 loading failed: {e}")
50
+ raise
51
+
52
+ # 1.2: Load video
53
+ log_and_progress(progress_callback, 1, 0.1, "Opening video file...")
54
+ try:
55
+ cap = cv2.VideoCapture(input_path)
56
+ if not cap.isOpened():
57
+ raise ValueError("Failed to open video file")
58
 
59
+ fps = cap.get(cv2.CAP_PROP_FPS)
60
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
61
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
62
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
63
 
64
+ logger.info(f"πŸ“Ή Video: {width}x{height} @ {fps:.2f}fps, {total_frames} frames")
65
+ except Exception as e:
66
+ logger.error(f"❌ Video opening failed: {e}")
67
+ raise
 
 
 
 
 
 
68
 
69
+ # 1.3: Read all frames
70
+ log_and_progress(progress_callback, 1, 0.15, f"Reading {total_frames} frames...")
71
+ frames = []
72
+ while True:
73
+ ret, frame = cap.read()
74
+ if not ret:
75
+ break
76
+ frames.append(frame)
77
+ cap.release()
78
+ logger.info(f"βœ… Read {len(frames)} frames")
79
 
80
+ # 1.4: Segment all frames
81
+ log_and_progress(progress_callback, 1, 0.2, "Starting person segmentation...")
82
+ masks = []
83
+
84
+ for i, frame in enumerate(frames):
85
+ # Progress from 0.2 to 1.0 during segmentation
86
+ progress = 0.2 + (0.8 * i / len(frames))
87
+
88
+ if i % 10 == 0 or i == 0 or i == len(frames) - 1:
89
+ log_and_progress(progress_callback, 1, progress,
90
+ f"Segmenting frame {i+1}/{len(frames)}...")
91
+
92
+ try:
93
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
94
+ sam_predictor.set_image(rgb_frame)
95
+
96
+ # Detect person using MediaPipe
97
+ mp_results = pose.process(rgb_frame)
98
+
99
+ if mp_results.pose_landmarks:
100
+ # Get bounding box from landmarks
101
+ h, w = frame.shape[:2]
102
+ landmarks = mp_results.pose_landmarks.landmark
103
+ xs = [lm.x * w for lm in landmarks]
104
+ ys = [lm.y * h for lm in landmarks]
105
+ x1, x2 = max(0, min(xs) - 20), min(w, max(xs) + 20)
106
+ y1, y2 = max(0, min(ys) - 20), min(h, max(ys) + 20)
107
+
108
+ input_box = np.array([x1, y1, x2, y2])
109
+ sam_masks, _, _ = sam_predictor.predict(
110
+ point_coords=None,
111
+ point_labels=None,
112
+ box=input_box[None, :],
113
+ multimask_output=False
114
+ )
115
+ masks.append(sam_masks[0])
116
  else:
117
+ masks.append(np.zeros((h, w), dtype=bool))
118
+
119
+ except Exception as e:
120
+ logger.error(f"❌ Frame {i+1} segmentation failed: {e}")
121
+ h, w = frame.shape[:2]
122
+ masks.append(np.zeros((h, w), dtype=bool))
123
+
124
+ stage1_time = time.time() - stage1_start
125
+ logger.info("="*60)
126
+ logger.info(f"βœ… STAGE 1 COMPLETE in {stage1_time:.1f}s")
127
+ logger.info(f" Segmented {len(masks)} frames")
128
+ logger.info("="*60)
129
 
130
+ # ============================================================
131
+ # STAGE 2: MATTING (Refine all masks, smooth, write video)
132
+ # ============================================================
133
+ logger.info("="*60)
134
+ logger.info("STAGE 2: HIGH-QUALITY MATTING")
135
+ logger.info("="*60)
136
+ stage2_start = time.time()
137
 
138
+ # 2.1: Load MatAnyone
139
+ log_and_progress(progress_callback, 2, 0.0, "Loading MatAnyone model...")
140
+ try:
141
+ matanyone = load_matanyone()
142
+ logger.info(f"βœ… MatAnyone loaded successfully")
143
+ except Exception as e:
144
+ logger.error(f"❌ MatAnyone loading failed: {e}")
145
+ raise
146
 
147
+ # 2.2: Process all frames with MatAnyone
148
+ log_and_progress(progress_callback, 2, 0.1, "Refining alpha mattes...")
149
+ alphas = []
150
 
151
+ for i, (frame, mask) in enumerate(zip(frames, masks)):
152
+ # Progress from 0.1 to 0.6 during matting
153
+ progress = 0.1 + (0.5 * i / len(frames))
154
+
155
+ if i % 10 == 0 or i == 0 or i == len(frames) - 1:
156
+ log_and_progress(progress_callback, 2, progress,
157
+ f"Matting frame {i+1}/{len(frames)}...")
158
 
159
+ try:
160
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
161
+
162
+ if mask.any():
163
+ result = matanyone(rgb_frame, mask)
164
+ alpha = result['alpha']
165
+ else:
166
+ alpha = np.zeros((frame.shape[0], frame.shape[1]), dtype=np.float32)
167
+
168
+ alphas.append(alpha)
169
+
170
+ except Exception as e:
171
+ logger.error(f"❌ Frame {i+1} matting failed: {e}")
172
+ alphas.append(np.zeros((frame.shape[0], frame.shape[1]), dtype=np.float32))
173
+
174
+ logger.info(f"βœ… Matted {len(alphas)} frames")
175
+
176
+ # 2.3: Temporal smoothing
177
+ log_and_progress(progress_callback, 2, 0.65, "Applying temporal smoothing to eliminate jitter...")
178
+ try:
179
+ smoothed_alphas = []
180
+ window_size = 5 # 5-frame window (current + 2 before + 2 after)
181
+ half_window = window_size // 2
182
 
183
+ for i in range(len(alphas)):
184
+ start_idx = max(0, i - half_window)
185
+ end_idx = min(len(alphas), i + half_window + 1)
186
+ window_alphas = alphas[start_idx:end_idx]
187
+
188
+ # Average the alphas in the window
189
+ smoothed = np.mean(window_alphas, axis=0)
190
+ smoothed_alphas.append(smoothed)
191
 
192
+ logger.info(f"βœ… Applied {window_size}-frame temporal smoothing")
193
+ alphas = smoothed_alphas
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ except Exception as e:
196
+ logger.error(f"⚠️ Smoothing failed: {e}, using unsmoothed alphas")
 
 
 
 
 
 
 
 
 
 
 
197
 
198
+ # 2.4: Write output video
199
+ log_and_progress(progress_callback, 2, 0.75, "Writing output video...")
200
+ try:
201
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
202
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height), True)
203
 
204
+ for i, (frame, alpha) in enumerate(zip(frames, alphas)):
205
+ # Progress from 0.75 to 1.0 during video writing
206
+ progress = 0.75 + (0.25 * i / len(frames))
207
+
208
+ if i % 30 == 0 or i == 0 or i == len(frames) - 1:
209
+ log_and_progress(progress_callback, 2, progress,
210
+ f"Writing frame {i+1}/{len(frames)}...")
211
+
212
+ # Create transparent output
213
+ alpha_3ch = np.stack([alpha] * 3, axis=-1)
214
+ output = (frame * alpha_3ch).astype(np.uint8)
215
+ out.write(output)
216
 
217
+ out.release()
218
+ logger.info(f"βœ… Video written to {output_path}")
 
219
 
220
+ except Exception as e:
221
+ logger.error(f"❌ Video writing failed: {e}")
222
+ raise
223
+
224
+ stage2_time = time.time() - stage2_start
225
+ total_time = stage1_time + stage2_time
226
+
227
+ logger.info("="*60)
228
+ logger.info(f"βœ… STAGE 2 COMPLETE in {stage2_time:.1f}s")
229
+ logger.info("="*60)
230
+ logger.info(f"πŸŽ‰ TOTAL PROCESSING TIME: {total_time:.1f}s")
231
+ logger.info(f" Stage 1 (Segmentation): {stage1_time:.1f}s")
232
+ logger.info(f" Stage 2 (Matting): {stage2_time:.1f}s")
233
+ logger.info(f" Average: {total_time/len(frames):.2f}s per frame")
234
+ logger.info("="*60)
235
+
236
+ log_and_progress(progress_callback, 2, 1.0, "Processing complete!")
237
+ return output_path
238
+
239
+ def main():
240
+ st.title("πŸŽ₯ Video Matting with SAM2 + MatAnyone")
241
+ st.write("Upload a video to remove the background from people")
242
+
243
+ uploaded_file = st.file_uploader("Choose a video file", type=['mp4', 'avi', 'mov'])
244
+
245
+ if uploaded_file:
246
+ # Create temp files
247
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_input:
248
+ tmp_input.write(uploaded_file.read())
249
+ input_path = tmp_input.name
250
 
251
+ output_path = tempfile.mktemp(suffix='_output.mp4')
252
+
253
+ if st.button("πŸš€ Process Video", type="primary"):
254
+ # Progress tracking
255
+ stage1_progress = st.progress(0, text="Stage 1: Initializing...")
256
+ stage1_status = st.empty()
257
+
258
+ stage2_progress = st.progress(0, text="Stage 2: Waiting...")
259
+ stage2_status = st.empty()
260
+
261
+ def update_progress(stage, progress, message):
262
+ if stage == 1:
263
+ stage1_progress.progress(progress, text=f"Stage 1: {message}")
264
+ stage1_status.info(f"πŸ”„ {message}")
265
+ elif stage == 2:
266
+ stage2_progress.progress(progress, text=f"Stage 2: {message}")
267
+ stage2_status.info(f"πŸ”„ {message}")
268
+
269
  try:
270
+ logger.info("🎬 Starting video processing...")
271
+ result_path = process_video(input_path, output_path, update_progress)
272
+
273
+ stage1_status.success("βœ… Stage 1: Segmentation complete!")
274
+ stage2_status.success("βœ… Stage 2: Matting complete!")
275
+
276
+ st.success("πŸŽ‰ Processing complete!")
277
+
278
+ # Offer download
279
+ with open(result_path, 'rb') as f:
280
+ st.download_button(
281
+ label="πŸ“₯ Download Result",
282
+ data=f,
283
+ file_name="output_matted.mp4",
284
+ mime="video/mp4"
285
+ )
286
+
287
+ # Show result
288
+ st.video(result_path)
289
+
290
  except Exception as e:
291
+ logger.exception("Processing failed")
292
+ st.error(f"❌ Processing failed: {str(e)}")
293
+ st.error("Check the logs above for details")
 
 
 
 
 
 
294
 
295
+ finally:
296
+ # Cleanup
297
+ for path in [input_path, output_path]:
298
+ if os.path.exists(path):
299
+ try:
300
+ os.unlink(path)
301
+ except:
302
+ pass
303
 
304
  if __name__ == "__main__":
305
  main()