File size: 4,006 Bytes
08bfaed 0ffce02 08bfaed bc76fbb 24fa457 bc76fbb 0ffce02 08bfaed 0ffce02 08bfaed 0ffce02 08bfaed 24fa457 08bfaed 0ffce02 08bfaed 0ffce02 bc76fbb 08bfaed 0ffce02 08bfaed 0ffce02 08bfaed 0ffce02 08bfaed 24fa457 08bfaed 24fa457 08bfaed 24fa457 08bfaed 0ffce02 24fa457 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import cv2
import mediapipe as mp
import numpy as np
import gradio as gr
import os
import time
# Set MPLCONFIGDIR to avoid Matplotlib cache permission issues
os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib-cache"
# Initialize MediaPipe Selfie Segmentation
mp_selfie_segmentation = mp.solutions.selfie_segmentation
segmentation = mp_selfie_segmentation.SelfieSegmentation(model_selection=1)
# Global settings
settings = {
"seg_enabled": True,
"blur_bg": False,
"set_bg": False,
"set_color": False,
"bg_color": (0, 0, 0), # BGR
"blur_intensity": 15
}
bg_image = None
def process_frame(frame, seg_enabled, blur_bg, set_bg, set_color, bg_color, blur_intensity, custom_image=None):
global bg_image
settings.update({
"seg_enabled": seg_enabled,
"blur_bg": blur_bg,
"set_bg": set_bg,
"set_color": set_color,
"bg_color": tuple(map(int, bg_color.split(","))) if set_color and bg_color else (0, 0, 0),
"blur_intensity": blur_intensity
})
if custom_image is not None and set_bg:
bg_image = custom_image
if frame is None:
return None, "No video frame received"
process_start = time.time()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
result = segmentation.process(frame_rgb)
mask = result.segmentation_mask
# Create alpha mask
alpha = mask > 0.5
alpha = alpha.astype(np.uint8) * 255
alpha = cv2.merge([alpha, alpha, alpha])
output_frame = frame.copy()
if settings["seg_enabled"]:
if settings["blur_bg"]:
bg = cv2.resize(frame, None, fx=0.1, fy=0.1, interpolation=cv2.INTER_LINEAR)
ksize = settings["blur_intensity"]
if ksize % 2 == 0:
ksize -= 1
bg = cv2.GaussianBlur(bg, (ksize, ksize), 0)
bg = cv2.resize(bg, (frame.shape[1], frame.shape[0]), interpolation=cv2.INTER_LINEAR)
output_frame = np.where(alpha == 255, frame, bg)
elif settings["set_bg"] and bg_image is not None:
if bg_image.shape[:2] != frame.shape[:2]:
bg_image = cv2.resize(bg_image, (frame.shape[1], frame.shape[0]))
output_frame = np.where(alpha == 255, frame, bg_image)
elif settings["set_color"]:
bg = np.full_like(frame, settings["bg_color"])
output_frame = np.where(alpha == 255, frame, bg)
else:
bg = np.zeros_like(frame)
output_frame = np.where(alpha == 255, frame, bg)
process_time = (time.time() - process_start) * 1000
return output_frame, f"{process_time:.2f} ms"
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# AI Background Remover")
with gr.Row():
with gr.Column():
webcam = gr.Image(sources=["webcam"], streaming=True, label="Live Video")
output_image = gr.Image(label="Processed Output")
seg_enabled = gr.Checkbox(label="Enable Background Removal", value=True)
blur_bg = gr.Checkbox(label="Blur Background")
set_bg = gr.Checkbox(label="Custom Image Background")
custom_image = gr.Image(label="Upload Custom Background", type="numpy")
set_color = gr.Checkbox(label="Solid Color Background")
bg_color = gr.Textbox(label="Background Color (R,G,B)", value="0,0,0")
blur_intensity = gr.Slider(label="Blur Intensity", minimum=5, maximum=25, value=15, step=2)
processing_time = gr.Textbox(label="Processing Time", value="0 ms")
webcam.stream(
fn=process_frame,
inputs=[webcam, seg_enabled, blur_bg, set_bg, set_color, bg_color, blur_intensity, custom_image],
outputs=[output_image, processing_time],
_js="""async () => {
// Ensure webcam permissions are requested
await navigator.mediaDevices.getUserMedia({ video: true });
return true;
}"""
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860) |