Spaces:
Sleeping
Sleeping
| # face_blur_app.py | |
| import streamlit as st | |
| from mtcnn import MTCNN | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import requests | |
| from io import BytesIO | |
| def enhance_contrast(img_rgb_uint8): | |
| lab = cv2.cvtColor(img_rgb_uint8, cv2.COLOR_RGB2LAB) | |
| l_channel, a_channel, b_channel = cv2.split(lab) | |
| clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) | |
| cl = clahe.apply(l_channel) | |
| lab_enhanced = cv2.merge((cl, a_channel, b_channel)) | |
| img_rgb_enhanced = cv2.cvtColor(lab_enhanced, cv2.COLOR_LAB2RGB) | |
| return img_rgb_enhanced | |
| def detect_faces(img_rgb_uint8): | |
| detector = MTCNN() | |
| faces = detector.detect_faces(img_rgb_uint8) | |
| return faces | |
| def blur_faces_on_image(img_rgb_uint8, faces): | |
| img_copy = img_rgb_uint8.copy() | |
| for face in faces: | |
| x, y, width, height = face['box'] | |
| x1, y1 = x + width, y + height | |
| y_start, y_end = max(0, y), min(img_copy.shape[0], y1) | |
| x_start, x_end = max(0, x), min(img_copy.shape[1], x1) | |
| if x_start >= x_end or y_start >= y_end: | |
| continue | |
| face_region = img_copy[y_start:y_end, x_start:x_end] | |
| k_size_w = min(width, height) // 4 * 2 + 1 | |
| k_size_h = k_size_w | |
| if k_size_w < 3: | |
| k_size_w = k_size_h = 3 | |
| if face_region.size > 0: | |
| blurred_face = cv2.GaussianBlur(face_region, (k_size_w, k_size_h), 30) | |
| img_copy[y_start:y_end, x_start:x_end] = blurred_face | |
| return img_copy | |
| st.set_page_config(layout="wide", page_title="Face Blurring App") | |
| st.title("Face Blurring Application") | |
| st.markdown(""" | |
| Upload an image or provide an image URL to detect and blur faces. | |
| The "Original Processed Image" is the version (resized and contrast-enhanced) that the face detector sees. | |
| """) | |
| uploaded_file = st.sidebar.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
| image_url = st.sidebar.text_input("Or enter image URL", placeholder="e.g., https://example.com/image.jpg") | |
| process_button = st.sidebar.button("Blur Faces", use_container_width=True, type="primary") | |
| if 'original_image_to_display' not in st.session_state: | |
| st.session_state.original_image_to_display = None | |
| if 'blurred_image_to_display' not in st.session_state: | |
| st.session_state.blurred_image_to_display = None | |
| col1, col2 = st.columns(2) | |
| if process_button: | |
| pil_image = None | |
| error_message = "" | |
| if uploaded_file is not None: | |
| try: | |
| pil_image = Image.open(uploaded_file).convert("RGB") | |
| st.sidebar.success("Image uploaded successfully!") | |
| except Exception as e: | |
| error_message = f"Error opening uploaded file: {e}" | |
| st.sidebar.error(error_message) | |
| elif image_url: | |
| try: | |
| response = requests.get(image_url, timeout=10) | |
| response.raise_for_status() | |
| pil_image = Image.open(BytesIO(response.content)).convert("RGB") | |
| st.sidebar.success("Image downloaded successfully!") | |
| except requests.exceptions.RequestException as e: | |
| error_message = f"Error fetching image from URL: {e}" | |
| st.sidebar.error(error_message) | |
| except Exception as e: | |
| error_message = f"Error opening image from URL: {e}" | |
| st.sidebar.error(error_message) | |
| else: | |
| error_message = "Please upload an image or provide an image URL." | |
| st.sidebar.warning(error_message) | |
| if pil_image is not None: | |
| img_np_raw = np.array(pil_image) | |
| img_resized_uint8 = cv2.resize(img_np_raw, (320, 320), interpolation=cv2.INTER_LINEAR) | |
| img_processed_for_detection_uint8 = enhance_contrast(img_resized_uint8) | |
| st.session_state.original_image_to_display = img_processed_for_detection_uint8.copy() | |
| with st.spinner('Detecting faces...'): | |
| faces = detect_faces(img_processed_for_detection_uint8) | |
| if not faces: | |
| st.info("No faces detected in the image.") | |
| st.session_state.blurred_image_to_display = st.session_state.original_image_to_display.copy() | |
| else: | |
| st.success(f"Detected {len(faces)} face(s). Blurring them now...") | |
| img_blurred_np_uint8 = blur_faces_on_image(img_processed_for_detection_uint8.copy(), faces) | |
| st.session_state.blurred_image_to_display = img_blurred_np_uint8 | |
| else: | |
| st.session_state.original_image_to_display = None | |
| st.session_state.blurred_image_to_display = None | |
| with col1: | |
| st.subheader("Original Processed Image") | |
| if st.session_state.original_image_to_display is not None: | |
| st.image(st.session_state.original_image_to_display, use_column_width=True, | |
| caption="Image after resizing and contrast enhancement (input to detector)") | |
| else: | |
| st.info("Upload an image or provide a URL and click 'Blur Faces'.") | |
| with col2: | |
| st.subheader("Blurred Image") | |
| if st.session_state.blurred_image_to_display is not None: | |
| st.image(st.session_state.blurred_image_to_display, use_column_width=True, | |
| caption="Image with detected faces blurred") | |
| else: | |
| st.info("Blurred image will appear here after processing.") | |
| st.sidebar.markdown("---") | |
| st.sidebar.markdown("Built with [Streamlit](https://streamlit.io) and MTCNN.") | |