MogensR commited on
Commit
fe45f40
Β·
1 Parent(s): a402fd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +229 -301
app.py CHANGED
@@ -1,7 +1,8 @@
1
  #!/usr/bin/env python3
2
  """
3
  BackgroundFX - Video Background Replacement with Green Screen Workflow
4
- Hugging Face Space Implementation with SAM2 + MatAnyone
 
5
  """
6
 
7
  import streamlit as st
@@ -13,6 +14,7 @@
13
  import requests
14
  from io import BytesIO
15
  import logging
 
16
 
17
  # Configure logging
18
  logging.basicConfig(level=logging.INFO)
@@ -36,6 +38,41 @@
36
  MATANYONE_AVAILABLE = False
37
  logger.warning(f"⚠️ MatAnyone not available: {e}")
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  def load_background_image(background_url):
40
  """Load background image from URL"""
41
  try:
@@ -45,12 +82,10 @@ def load_background_image(background_url):
45
  return np.array(image.convert('RGB'))
46
  except Exception as e:
47
  logger.error(f"Failed to load background image: {e}")
48
- # Return default brick wall background
49
  return create_default_background()
50
 
51
  def create_default_background():
52
  """Create a default brick wall background"""
53
- # Create a simple brick pattern
54
  height, width = 720, 1280
55
  background = np.ones((height, width, 3), dtype=np.uint8) * 150
56
 
@@ -58,11 +93,9 @@ def create_default_background():
58
  brick_height, brick_width = 40, 80
59
  for y in range(0, height, brick_height):
60
  for x in range(0, width, brick_width):
61
- # Alternate brick offset
62
  offset = brick_width // 2 if (y // brick_height) % 2 else 0
63
  x_pos = (x + offset) % width
64
 
65
- # Draw brick
66
  cv2.rectangle(background,
67
  (x_pos, y),
68
  (min(x_pos + brick_width - 2, width), min(y + brick_height - 2, height)),
@@ -77,18 +110,13 @@ def create_default_background():
77
  def segment_person_sam2(frame):
78
  """Segment person using SAM2"""
79
  try:
80
- # Initialize SAM2 predictor
81
  predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large")
82
-
83
- # Set image
84
  predictor.set_image(frame)
85
 
86
- # Use center point as prompt (assuming person is in center)
87
  h, w = frame.shape[:2]
88
  center_point = np.array([[w//2, h//2]])
89
  center_label = np.array([1])
90
 
91
- # Predict mask
92
  masks, scores, _ = predictor.predict(
93
  point_coords=center_point,
94
  point_labels=center_label,
@@ -104,36 +132,25 @@ def segment_person_sam2(frame):
104
  def segment_person_fallback(frame):
105
  """Fallback person segmentation using color-based method"""
106
  try:
107
- # Convert to HSV for better skin detection
108
  hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
109
 
110
- # Define skin color range
111
  lower_skin = np.array([0, 20, 70])
112
  upper_skin = np.array([20, 255, 255])
113
 
114
- # Create mask for skin tones
115
  skin_mask = cv2.inRange(hsv, lower_skin, upper_skin)
116
 
117
- # Morphological operations to clean up mask
118
  kernel = np.ones((5, 5), np.uint8)
119
  skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_CLOSE, kernel)
120
  skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_OPEN, kernel)
121
 
122
- # Find largest contour (assumed to be person)
123
  contours, _ = cv2.findContours(skin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
124
 
125
  if contours:
126
- # Get largest contour
127
  largest_contour = max(contours, key=cv2.contourArea)
128
-
129
- # Create mask from contour
130
  mask = np.zeros(frame.shape[:2], dtype=np.uint8)
131
  cv2.fillPoly(mask, [largest_contour], 255)
132
-
133
- # Expand mask to include more of the person
134
  kernel = np.ones((20, 20), np.uint8)
135
  mask = cv2.dilate(mask, kernel, iterations=2)
136
-
137
  return mask.astype(bool)
138
 
139
  return None
@@ -145,65 +162,43 @@ def segment_person_fallback(frame):
145
  def insert_green_screen(frame, person_mask):
146
  """Insert green screen background while preserving person"""
147
  try:
148
- # Create green background
149
  green_background = np.zeros_like(frame)
150
- green_background[:, :] = [0, 255, 0] # Pure green (RGB)
151
 
152
- # Combine person with green background
153
- # Where mask is True (person), keep original frame
154
- # Where mask is False (background), use green
155
  result = np.where(person_mask[..., None], frame, green_background)
156
-
157
  return result
158
 
159
  except Exception as e:
160
  logger.error(f"Green screen insertion failed: {e}")
161
  return frame
162
 
163
- def chroma_key_replacement(green_screen_frame, new_background):
164
- """Replace green screen with new background using chroma key"""
165
  try:
166
- # Resize background to match frame
167
- h, w = green_screen_frame.shape[:2]
168
- background_resized = cv2.resize(new_background, (w, h))
169
-
170
- # Convert to HSV for better green detection
171
- hsv = cv2.cvtColor(green_screen_frame, cv2.COLOR_RGB2HSV)
172
-
173
- # Define green color range for chroma key
174
- lower_green = np.array([40, 50, 50])
175
- upper_green = np.array([80, 255, 255])
176
-
177
- # Create mask for green pixels
178
- green_mask = cv2.inRange(hsv, lower_green, upper_green)
179
-
180
- # Smooth the mask
181
- kernel = np.ones((3, 3), np.uint8)
182
- green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_CLOSE, kernel)
183
- green_mask = cv2.GaussianBlur(green_mask, (5, 5), 0)
184
 
185
- # Normalize mask to 0-1 range
186
- mask_normalized = green_mask.astype(float) / 255
 
 
187
 
188
- # Apply chroma key replacement
189
- result = green_screen_frame.copy()
190
  for c in range(3):
191
- result[:, :, c] = (green_screen_frame[:, :, c] * (1 - mask_normalized) +
192
- background_resized[:, :, c] * mask_normalized)
193
 
194
  return result.astype(np.uint8)
195
 
196
  except Exception as e:
197
- logger.error(f"Chroma key replacement failed: {e}")
198
- return green_screen_frame
199
 
200
- def process_video_with_green_screen(video_path, background_url, progress_callback=None):
201
- """Process video with proper green screen workflow"""
202
  try:
203
- # Load background image
204
- background_image = load_background_image(background_url)
205
-
206
- # Open video
207
  cap = cv2.VideoCapture(video_path)
208
 
209
  # Get video properties
@@ -212,8 +207,7 @@ def process_video_with_green_screen(video_path, background_url, progress_callbac
212
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
213
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
214
 
215
- # Create output video writer
216
- output_path = tempfile.mktemp(suffix='.mp4')
217
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
218
  out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
219
 
@@ -224,296 +218,230 @@ def process_video_with_green_screen(video_path, background_url, progress_callbac
224
  if not ret:
225
  break
226
 
227
- # Convert BGR to RGB
228
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
229
 
230
- # Step 1: Segment person
231
  if SAM2_AVAILABLE:
232
  person_mask = segment_person_sam2(frame_rgb)
233
- method_used = "SAM2"
234
  else:
235
  person_mask = segment_person_fallback(frame_rgb)
236
- method_used = "Fallback"
237
 
238
  if person_mask is not None:
239
- # Step 2: Insert green screen
240
- green_screen_frame = insert_green_screen(frame_rgb, person_mask)
241
-
242
- # Step 3: Chroma key replacement
243
- final_frame = chroma_key_replacement(green_screen_frame, background_image)
244
- else:
245
- # If segmentation fails, use original frame
246
- final_frame = frame_rgb
247
- method_used = "No segmentation"
 
 
 
 
 
248
 
249
- # Convert back to BGR for video writer
250
- final_frame_bgr = cv2.cvtColor(final_frame, cv2.COLOR_RGB2BGR)
251
- out.write(final_frame_bgr)
252
 
253
  frame_count += 1
254
-
255
- # Update progress
256
  if progress_callback:
257
- progress = frame_count / total_frames
258
- progress_callback(progress, f"Processing frame {frame_count}/{total_frames} ({method_used})")
259
 
260
- # Release resources
261
  cap.release()
262
  out.release()
263
 
264
- return output_path
265
 
266
  except Exception as e:
267
  logger.error(f"Video processing failed: {e}")
268
- return None
269
-
270
- def process_video_with_green_screen_custom(video_path, background_image, progress_callback=None):
271
- """Process video with custom background image"""
272
- try:
273
- # Open video
274
- cap = cv2.VideoCapture(video_path)
275
-
276
- # Get video properties
277
- fps = int(cap.get(cv2.CAP_PROP_FPS))
278
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
279
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
280
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
281
-
282
- # Create output video writer
283
- output_path = tempfile.mktemp(suffix='.mp4')
284
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
285
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
286
-
287
- frame_count = 0
288
-
289
- while True:
290
- ret, frame = cap.read()
291
- if not ret:
292
- break
293
-
294
- # Convert BGR to RGB
295
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
296
-
297
- # Step 1: Segment person
298
- if SAM2_AVAILABLE:
299
- person_mask = segment_person_sam2(frame_rgb)
300
- method_used = "SAM2"
301
- else:
302
- person_mask = segment_person_fallback(frame_rgb)
303
- method_used = "Fallback"
304
-
305
- if person_mask is not None:
306
- # Step 2: Insert green screen
307
- green_screen_frame = insert_green_screen(frame_rgb, person_mask)
308
-
309
- # Step 3: Chroma key replacement with custom background
310
- final_frame = chroma_key_replacement(green_screen_frame, background_image)
311
- else:
312
- # If segmentation fails, use original frame
313
- final_frame = frame_rgb
314
- method_used = "No segmentation"
315
-
316
- # Convert back to BGR for video writer
317
- final_frame_bgr = cv2.cvtColor(final_frame, cv2.COLOR_RGB2BGR)
318
- out.write(final_frame_bgr)
319
-
320
- frame_count += 1
321
-
322
- # Update progress
323
- if progress_callback:
324
- progress = frame_count / total_frames
325
- progress_callback(progress, f"Processing frame {frame_count}/{total_frames} ({method_used})")
326
-
327
- # Release resources
328
- cap.release()
329
- out.release()
330
-
331
- return output_path
332
-
333
- except Exception as e:
334
- logger.error(f"Video processing with custom background failed: {e}")
335
- return None
336
 
 
337
  def main():
338
- """Streamlit main function"""
339
  st.set_page_config(
340
- page_title="BackgroundFX - Video Background Replacement",
341
  page_icon="🎬",
342
  layout="wide"
343
  )
344
 
345
  st.title("🎬 BackgroundFX - Video Background Replacement")
346
- st.markdown("**Professional video background replacement with green screen workflow**")
347
 
348
- # Show available methods
349
- methods = []
350
- if SAM2_AVAILABLE:
351
- methods.append("βœ… SAM2 (AI Segmentation)")
352
- if MATANYONE_AVAILABLE:
353
- methods.append("βœ… MatAnyone (Advanced Processing)")
354
- methods.append("βœ… Fallback Method (Color-based)")
355
 
356
- st.sidebar.markdown("### Available Methods")
357
- for method in methods:
358
- st.sidebar.markdown(method)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
 
360
- # File upload
361
  col1, col2 = st.columns(2)
362
 
363
- # Initialize video_path variable
364
- video_path = None
365
-
366
  with col1:
367
- st.markdown("### πŸ“Ή Upload Video")
368
  uploaded_video = st.file_uploader(
369
- "Choose a video file",
370
  type=['mp4', 'avi', 'mov', 'mkv'],
371
- help="Upload the video you want to process"
372
  )
373
 
374
- if uploaded_video:
375
- # Display video info
376
- st.success(f"βœ… Video uploaded: {uploaded_video.name}")
377
-
378
- # Read video data once and store it
379
  video_bytes = uploaded_video.read()
380
 
381
- # Save uploaded video
382
  with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file:
383
  tmp_file.write(video_bytes)
384
- video_path = tmp_file.name
385
 
386
- # Show video preview using the bytes
387
  st.video(video_bytes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
 
389
  with col2:
390
- st.markdown("### πŸ–ΌοΈ Background Image")
391
-
392
- # Background selection method
393
- background_method = st.radio(
394
- "Choose background method:",
395
- ["πŸ“‹ Preset Backgrounds", "πŸ“ Upload Custom Image"],
396
- index=0
397
- )
398
 
399
- background_url = None
400
- custom_background = None
401
-
402
- if background_method == "πŸ“‹ Preset Backgrounds":
403
- # Default background options
404
- background_options = {
405
- "Brick Wall": "https://images.unsplash.com/photo-1558618666-fcd25c85cd64?w=1280&h=720&fit=crop",
406
- "Office": "https://images.unsplash.com/photo-1497366216548-37526070297c?w=1280&h=720&fit=crop",
407
- "Nature": "https://images.unsplash.com/photo-1506905925346-21bda4d32df4?w=1280&h=720&fit=crop",
408
- "City": "https://images.unsplash.com/photo-1449824913935-59a10b8d2000?w=1280&h=720&fit=crop"
409
- }
410
-
411
- selected_background = st.selectbox(
412
- "Choose background",
413
- options=list(background_options.keys()),
414
- index=0
415
- )
416
-
417
- background_url = background_options[selected_background]
418
-
419
- # Show background preview
420
- try:
421
- background_image = load_background_image(background_url)
422
- st.image(background_image, caption=f"Background: {selected_background}", use_column_width=True)
423
- except:
424
- st.error("Failed to load background image")
425
-
426
- else: # Upload Custom Image
427
- uploaded_background = st.file_uploader(
428
- "Upload your background image",
429
- type=['jpg', 'jpeg', 'png', 'bmp'],
430
- help="Upload a custom background image (JPG, PNG, BMP)"
431
- )
432
-
433
- if uploaded_background:
434
- # Load and display custom background
435
  try:
436
- custom_background = np.array(Image.open(uploaded_background).convert('RGB'))
437
- st.image(custom_background, caption="Custom Background", use_column_width=True)
438
- st.success(f"βœ… Custom background uploaded: {uploaded_background.name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
  except Exception as e:
440
- st.error(f"Failed to load custom background: {e}")
441
- custom_background = None
442
- else:
443
- st.info("Please upload a background image")
 
 
 
444
 
445
- # Process button
446
- if uploaded_video and video_path and st.button("🎬 Process Video", type="primary"):
447
-
448
- # Check if background is selected
449
- if background_method == "πŸ“‹ Preset Backgrounds" and not background_url:
450
- st.error("Please select a preset background")
451
- return
452
- elif background_method == "πŸ“ Upload Custom Image" and custom_background is None:
453
- st.error("Please upload a custom background image")
454
- return
455
-
456
- with st.spinner("Processing video with green screen workflow..."):
457
- # Create progress bar
458
- progress_bar = st.progress(0)
459
- status_text = st.empty()
460
-
461
- def update_progress(progress, message):
462
- progress_bar.progress(progress)
463
- status_text.text(message)
464
-
465
- # Process video with appropriate background
466
- if background_method == "πŸ“‹ Preset Backgrounds":
467
- output_path = process_video_with_green_screen(
468
- video_path,
469
- background_url,
470
- progress_callback=update_progress
471
- )
472
- else: # Custom background
473
- output_path = process_video_with_green_screen_custom(
474
- video_path,
475
- custom_background,
476
- progress_callback=update_progress
477
- )
478
-
479
- if output_path and os.path.exists(output_path):
480
- st.success("βœ… Video processing completed!")
481
-
482
- # Display processed video
483
- st.markdown("### πŸŽ‰ Processed Video")
484
-
485
- with open(output_path, 'rb') as video_file:
486
- video_bytes = video_file.read()
487
- st.video(video_bytes)
488
-
489
- # Download button
490
- st.download_button(
491
- label="πŸ“₯ Download Processed Video",
492
- data=video_bytes,
493
- file_name=f"backgroundfx_{uploaded_video.name}",
494
- mime="video/mp4"
495
- )
496
-
497
- # Cleanup
498
- try:
499
- os.unlink(video_path)
500
- os.unlink(output_path)
501
- except:
502
- pass
503
- else:
504
- st.error("❌ Video processing failed. Please try again.")
505
 
506
  # Footer
507
  st.markdown("---")
508
- st.markdown("### πŸ”§ Technical Details")
509
- st.markdown("""
510
- **Green Screen Workflow:**
511
- 1. **Person Segmentation** - AI identifies the person in each frame
512
- 2. **Green Screen Insert** - Replaces background with pure green
513
- 3. **Chroma Key Replacement** - Replaces green with new background
514
-
515
- This ensures clean edges and professional results.
516
- """)
517
 
518
  if __name__ == "__main__":
519
  main()
 
1
  #!/usr/bin/env python3
2
  """
3
  BackgroundFX - Video Background Replacement with Green Screen Workflow
4
+ Fixed for Hugging Face Space - Handles video preview issues
5
+ FIXED: Video display issue by properly handling file stream
6
  """
7
 
8
  import streamlit as st
 
14
  import requests
15
  from io import BytesIO
16
  import logging
17
+ import base64
18
 
19
  # Configure logging
20
  logging.basicConfig(level=logging.INFO)
 
38
  MATANYONE_AVAILABLE = False
39
  logger.warning(f"⚠️ MatAnyone not available: {e}")
40
 
41
+ def get_video_info(video_path):
42
+ """Get video information and first frame"""
43
+ try:
44
+ cap = cv2.VideoCapture(video_path)
45
+ if not cap.isOpened():
46
+ return None, None
47
+
48
+ # Get video properties
49
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
50
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
51
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
52
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
53
+ duration = total_frames / fps if fps > 0 else 0
54
+
55
+ # Get first frame for thumbnail
56
+ ret, first_frame = cap.read()
57
+ cap.release()
58
+
59
+ if ret:
60
+ # Convert BGR to RGB
61
+ first_frame_rgb = cv2.cvtColor(first_frame, cv2.COLOR_BGR2RGB)
62
+ return {
63
+ 'fps': fps,
64
+ 'width': width,
65
+ 'height': height,
66
+ 'total_frames': total_frames,
67
+ 'duration': duration
68
+ }, first_frame_rgb
69
+
70
+ return None, None
71
+
72
+ except Exception as e:
73
+ logger.error(f"Error getting video info: {e}")
74
+ return None, None
75
+
76
  def load_background_image(background_url):
77
  """Load background image from URL"""
78
  try:
 
82
  return np.array(image.convert('RGB'))
83
  except Exception as e:
84
  logger.error(f"Failed to load background image: {e}")
 
85
  return create_default_background()
86
 
87
  def create_default_background():
88
  """Create a default brick wall background"""
 
89
  height, width = 720, 1280
90
  background = np.ones((height, width, 3), dtype=np.uint8) * 150
91
 
 
93
  brick_height, brick_width = 40, 80
94
  for y in range(0, height, brick_height):
95
  for x in range(0, width, brick_width):
 
96
  offset = brick_width // 2 if (y // brick_height) % 2 else 0
97
  x_pos = (x + offset) % width
98
 
 
99
  cv2.rectangle(background,
100
  (x_pos, y),
101
  (min(x_pos + brick_width - 2, width), min(y + brick_height - 2, height)),
 
110
  def segment_person_sam2(frame):
111
  """Segment person using SAM2"""
112
  try:
 
113
  predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large")
 
 
114
  predictor.set_image(frame)
115
 
 
116
  h, w = frame.shape[:2]
117
  center_point = np.array([[w//2, h//2]])
118
  center_label = np.array([1])
119
 
 
120
  masks, scores, _ = predictor.predict(
121
  point_coords=center_point,
122
  point_labels=center_label,
 
132
  def segment_person_fallback(frame):
133
  """Fallback person segmentation using color-based method"""
134
  try:
 
135
  hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
136
 
 
137
  lower_skin = np.array([0, 20, 70])
138
  upper_skin = np.array([20, 255, 255])
139
 
 
140
  skin_mask = cv2.inRange(hsv, lower_skin, upper_skin)
141
 
 
142
  kernel = np.ones((5, 5), np.uint8)
143
  skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_CLOSE, kernel)
144
  skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_OPEN, kernel)
145
 
 
146
  contours, _ = cv2.findContours(skin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
147
 
148
  if contours:
 
149
  largest_contour = max(contours, key=cv2.contourArea)
 
 
150
  mask = np.zeros(frame.shape[:2], dtype=np.uint8)
151
  cv2.fillPoly(mask, [largest_contour], 255)
 
 
152
  kernel = np.ones((20, 20), np.uint8)
153
  mask = cv2.dilate(mask, kernel, iterations=2)
 
154
  return mask.astype(bool)
155
 
156
  return None
 
162
  def insert_green_screen(frame, person_mask):
163
  """Insert green screen background while preserving person"""
164
  try:
 
165
  green_background = np.zeros_like(frame)
166
+ green_background[:, :] = [0, 255, 0] # Pure green
167
 
 
 
 
168
  result = np.where(person_mask[..., None], frame, green_background)
 
169
  return result
170
 
171
  except Exception as e:
172
  logger.error(f"Green screen insertion failed: {e}")
173
  return frame
174
 
175
+ def replace_background(frame, person_mask, background):
176
+ """Replace background with new image"""
177
  try:
178
+ # Resize background to match frame dimensions
179
+ h, w = frame.shape[:2]
180
+ background_resized = cv2.resize(background, (w, h))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
+ # Create smooth edges
183
+ mask_float = person_mask.astype(np.float32)
184
+ kernel = np.ones((5, 5), np.float32) / 25
185
+ mask_float = cv2.filter2D(mask_float, -1, kernel)
186
 
187
+ # Composite the images
188
+ result = np.zeros_like(frame)
189
  for c in range(3):
190
+ result[:, :, c] = (mask_float * frame[:, :, c] +
191
+ (1 - mask_float) * background_resized[:, :, c])
192
 
193
  return result.astype(np.uint8)
194
 
195
  except Exception as e:
196
+ logger.error(f"Background replacement failed: {e}")
197
+ return frame
198
 
199
+ def process_video(video_path, output_path, background_image, use_green_screen=True, progress_callback=None):
200
+ """Process video with background replacement"""
201
  try:
 
 
 
 
202
  cap = cv2.VideoCapture(video_path)
203
 
204
  # Get video properties
 
207
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
208
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
209
 
210
+ # Setup video writer
 
211
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
212
  out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
213
 
 
218
  if not ret:
219
  break
220
 
221
+ # Convert BGR to RGB for processing
222
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
223
 
224
+ # Segment person
225
  if SAM2_AVAILABLE:
226
  person_mask = segment_person_sam2(frame_rgb)
 
227
  else:
228
  person_mask = segment_person_fallback(frame_rgb)
 
229
 
230
  if person_mask is not None:
231
+ if use_green_screen:
232
+ # First create green screen
233
+ frame_rgb = insert_green_screen(frame_rgb, person_mask)
234
+ # Then replace green with background
235
+ # Create green mask
236
+ lower_green = np.array([0, 200, 0])
237
+ upper_green = np.array([100, 255, 100])
238
+ green_mask = cv2.inRange(frame_rgb, lower_green, upper_green)
239
+ green_mask = green_mask.astype(bool)
240
+ # Replace green areas with background
241
+ frame_rgb = replace_background(frame_rgb, ~green_mask, background_image)
242
+ else:
243
+ # Direct background replacement
244
+ frame_rgb = replace_background(frame_rgb, person_mask, background_image)
245
 
246
+ # Convert back to BGR for writing
247
+ frame_bgr = cv2.cvtColor(frame_rgb, cv2.COLOR_RGB2BGR)
248
+ out.write(frame_bgr)
249
 
250
  frame_count += 1
 
 
251
  if progress_callback:
252
+ progress_callback(frame_count / total_frames)
 
253
 
 
254
  cap.release()
255
  out.release()
256
 
257
+ return True
258
 
259
  except Exception as e:
260
  logger.error(f"Video processing failed: {e}")
261
+ return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
263
+ # Streamlit UI
264
  def main():
 
265
  st.set_page_config(
266
+ page_title="BackgroundFX",
267
  page_icon="🎬",
268
  layout="wide"
269
  )
270
 
271
  st.title("🎬 BackgroundFX - Video Background Replacement")
272
+ st.markdown("Replace video backgrounds with AI-powered segmentation")
273
 
274
+ # Check dependencies
275
+ if not SAM2_AVAILABLE:
276
+ st.warning("⚠️ SAM2 not available - using fallback segmentation method")
277
+ if not MATANYONE_AVAILABLE:
278
+ st.info("ℹ️ MatAnyone not available - using standard matting")
 
 
279
 
280
+ # Sidebar for settings
281
+ with st.sidebar:
282
+ st.header("βš™οΈ Settings")
283
+
284
+ use_green_screen = st.checkbox(
285
+ "Use Green Screen Workflow",
286
+ value=True,
287
+ help="First create green screen, then replace with background"
288
+ )
289
+
290
+ st.subheader("πŸ“Έ Background Options")
291
+ bg_option = st.radio(
292
+ "Choose background source:",
293
+ ["Default Brick Wall", "Custom URL", "Upload Image"]
294
+ )
295
+
296
+ background_image = None
297
+
298
+ if bg_option == "Custom URL":
299
+ bg_url = st.text_input(
300
+ "Background Image URL",
301
+ value="https://images.unsplash.com/photo-1557683316-973673baf926",
302
+ help="Enter a direct image URL"
303
+ )
304
+ if bg_url:
305
+ with st.spinner("Loading background..."):
306
+ background_image = load_background_image(bg_url)
307
+ if background_image is not None:
308
+ st.success("βœ… Background loaded")
309
+ st.image(background_image, caption="Background Preview", use_column_width=True)
310
+
311
+ elif bg_option == "Upload Image":
312
+ uploaded_bg = st.file_uploader(
313
+ "Upload Background Image",
314
+ type=['jpg', 'jpeg', 'png'],
315
+ help="Upload your own background image"
316
+ )
317
+ if uploaded_bg is not None:
318
+ background_image = np.array(Image.open(uploaded_bg).convert('RGB'))
319
+ st.success("βœ… Background uploaded")
320
+ st.image(background_image, caption="Background Preview", use_column_width=True)
321
+
322
+ else: # Default Brick Wall
323
+ background_image = create_default_background()
324
+ st.info("Using default brick wall background")
325
+ st.image(background_image, caption="Default Background", use_column_width=True)
326
 
327
+ # Main content area
328
  col1, col2 = st.columns(2)
329
 
 
 
 
330
  with col1:
331
+ st.header("πŸ“Ή Input Video")
332
  uploaded_video = st.file_uploader(
333
+ "Upload your video",
334
  type=['mp4', 'avi', 'mov', 'mkv'],
335
+ help="Upload a video to process"
336
  )
337
 
338
+ if uploaded_video is not None:
339
+ # FIXED: Read bytes once and reuse
 
 
 
340
  video_bytes = uploaded_video.read()
341
 
342
+ # Save to temporary file for processing
343
  with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file:
344
  tmp_file.write(video_bytes)
345
+ temp_video_path = tmp_file.name
346
 
347
+ # Display video using the original bytes (not consuming the stream)
348
  st.video(video_bytes)
349
+
350
+ # Get video info using the temp file
351
+ video_info, first_frame = get_video_info(temp_video_path)
352
+
353
+ if video_info and first_frame is not None:
354
+ st.success(f"βœ… Video loaded: {video_info['width']}x{video_info['height']}, "
355
+ f"{video_info['fps']} fps, {video_info['duration']:.1f}s")
356
+
357
+ # Show first frame as thumbnail
358
+ st.image(first_frame, caption="First Frame Preview", use_column_width=True)
359
+
360
+ # Store paths in session state
361
+ if 'temp_video_path' not in st.session_state:
362
+ st.session_state.temp_video_path = temp_video_path
363
+ else:
364
+ st.error("Failed to read video information")
365
 
366
  with col2:
367
+ st.header("🎯 Output")
 
 
 
 
 
 
 
368
 
369
+ if uploaded_video is not None and background_image is not None:
370
+ if st.button("πŸš€ Process Video", type="primary"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371
  try:
372
+ # Create output path
373
+ output_path = tempfile.mktemp(suffix='.mp4')
374
+
375
+ # Process video with progress bar
376
+ progress_bar = st.progress(0)
377
+ status_text = st.empty()
378
+
379
+ def update_progress(progress):
380
+ progress_bar.progress(progress)
381
+ status_text.text(f"Processing: {int(progress * 100)}%")
382
+
383
+ status_text.text("Starting video processing...")
384
+
385
+ # Use the temp file path we saved
386
+ success = process_video(
387
+ st.session_state.temp_video_path,
388
+ output_path,
389
+ background_image,
390
+ use_green_screen,
391
+ update_progress
392
+ )
393
+
394
+ if success and os.path.exists(output_path):
395
+ status_text.text("βœ… Processing complete!")
396
+
397
+ # Read the processed video
398
+ with open(output_path, 'rb') as f:
399
+ processed_video = f.read()
400
+
401
+ # Display processed video
402
+ st.video(processed_video)
403
+
404
+ # Download button
405
+ st.download_button(
406
+ label="πŸ“₯ Download Processed Video",
407
+ data=processed_video,
408
+ file_name="backgroundfx_output.mp4",
409
+ mime="video/mp4"
410
+ )
411
+
412
+ # Cleanup
413
+ os.unlink(output_path)
414
+ else:
415
+ st.error("❌ Video processing failed")
416
+
417
  except Exception as e:
418
+ st.error(f"Error during processing: {str(e)}")
419
+ logger.error(f"Processing error: {e}")
420
+
421
+ elif uploaded_video is None:
422
+ st.info("πŸ‘ˆ Please upload a video to begin")
423
+ elif background_image is None:
424
+ st.info("πŸ‘ˆ Please select or upload a background image")
425
 
426
+ # Cleanup temporary files on session end
427
+ if 'temp_video_path' in st.session_state and os.path.exists(st.session_state.temp_video_path):
428
+ try:
429
+ os.unlink(st.session_state.temp_video_path)
430
+ del st.session_state.temp_video_path
431
+ except:
432
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
 
434
  # Footer
435
  st.markdown("---")
436
+ st.markdown(
437
+ """
438
+ <div style='text-align: center'>
439
+ <p>BackgroundFX v1.0 | AI-Powered Video Background Replacement</p>
440
+ <p>Using SAM2 for person segmentation and green screen technology</p>
441
+ </div>
442
+ """,
443
+ unsafe_allow_html=True
444
+ )
445
 
446
  if __name__ == "__main__":
447
  main()