Spaces:
Build error
Build error
added app.py
Browse files
app.py
CHANGED
|
@@ -1,78 +1,55 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import cv2 # OpenCV for image processing
|
| 3 |
import numpy as np
|
| 4 |
-
|
|
|
|
| 5 |
|
| 6 |
# --- PLACEHOLDER IMPORTS (UNCOMMENT/ADJUST AS NEEDED) ---
|
| 7 |
# NOTE: Make sure these core libraries are in your requirements.txt
|
| 8 |
# import deepface
|
| 9 |
-
# from deepface import DeepFace # Example import if using deepface
|
| 10 |
# from src.detect import detect_faces
|
| 11 |
# from src.recognize import recognize_face
|
| 12 |
-
# from src.utils import LogManager
|
| 13 |
-
|
| 14 |
|
| 15 |
# --- CONFIGURATION ---
|
| 16 |
-
# NOTE: Adjust these values based on your model/system performance
|
| 17 |
RECOGNITION_THRESHOLD = 0.6
|
| 18 |
FRAME_SKIP = 3 # Process every 3rd frame for performance
|
| 19 |
|
| 20 |
|
| 21 |
# --- VIDEO PROCESSING CLASS ---
|
| 22 |
-
|
|
|
|
| 23 |
"""
|
| 24 |
A class that processes video frames in real-time for face recognition.
|
| 25 |
"""
|
| 26 |
def __init__(self):
|
| 27 |
-
# Initialize
|
| 28 |
-
# This only runs once per Streamlit session or on initialization.
|
| 29 |
-
# Example: self.detector = DeepFace.build_model("mtcnn")
|
| 30 |
self.frame_count = 0
|
| 31 |
-
|
| 32 |
-
self.recognition_model = None # Placeholder
|
| 33 |
|
| 34 |
-
|
| 35 |
-
#
|
|
|
|
| 36 |
|
| 37 |
-
def transform(self, frame: np.ndarray) -> np.ndarray:
|
| 38 |
-
# Increment frame count
|
| 39 |
self.frame_count += 1
|
| 40 |
-
|
| 41 |
-
# Skip frames to reduce CPU load
|
| 42 |
if self.frame_count % FRAME_SKIP != 0:
|
| 43 |
-
return frame
|
| 44 |
-
|
| 45 |
-
# Convert frame from BGR (OpenCV default) to RGB
|
| 46 |
-
img = frame.copy()
|
| 47 |
|
| 48 |
-
#
|
|
|
|
| 49 |
h, w, _ = img.shape
|
| 50 |
-
# Example: faces = detect_faces(img, self.detection_model)
|
| 51 |
faces = [(w//4, h//4, w//2, h//2)] # Placeholder bounding box
|
| 52 |
|
| 53 |
for (x, y, w, h) in faces:
|
| 54 |
-
|
| 55 |
-
# Example: recognized_name, score = recognize_face(img, x, y, w, h, self.recognition_model)
|
| 56 |
-
recognized_name = "Unknown" # Placeholder result
|
| 57 |
score = 0.0
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
if recognized_name != "Unknown" and score >= RECOGNITION_THRESHOLD:
|
| 61 |
-
color = (0, 255, 0) # Green for known user
|
| 62 |
-
# self.log_manager.log_access(recognized_name)
|
| 63 |
-
else:
|
| 64 |
-
color = (0, 0, 255) # Red for unknown user
|
| 65 |
-
recognized_name = "Unknown"
|
| 66 |
-
|
| 67 |
-
# Draw bounding box
|
| 68 |
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
|
| 69 |
-
|
| 70 |
-
# Draw label
|
| 71 |
label = f"{recognized_name}: {score:.2f}"
|
| 72 |
-
cv2.putText(img, label, (x, y - 10),
|
| 73 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
|
| 74 |
|
| 75 |
-
|
|
|
|
| 76 |
|
| 77 |
# --- STREAMLIT UI ---
|
| 78 |
|
|
@@ -82,18 +59,22 @@ def main():
|
|
| 82 |
st.title("Smart Office Face Recognition System 📸")
|
| 83 |
st.sidebar.title("Configuration")
|
| 84 |
|
| 85 |
-
# Sidebar control for threshold
|
| 86 |
RECOGNITION_THRESHOLD = st.sidebar.slider(
|
| 87 |
"Recognition Threshold", min_value=0.0, max_value=1.0, value=0.6, step=0.05
|
| 88 |
)
|
| 89 |
|
| 90 |
-
#
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
webrtc_streamer(
|
| 93 |
-
key=
|
| 94 |
mode=WebRtcMode.SENDRECV,
|
| 95 |
|
| 96 |
-
# ---
|
| 97 |
rtc_configuration={
|
| 98 |
"iceServers": [
|
| 99 |
{"urls": ["stun:stun.l.google.com:19302"]},
|
|
@@ -101,17 +82,15 @@ def main():
|
|
| 101 |
]
|
| 102 |
},
|
| 103 |
|
| 104 |
-
#
|
| 105 |
-
video_processor_factory=
|
| 106 |
-
async_processing=True
|
| 107 |
)
|
| 108 |
|
| 109 |
st.markdown("---")
|
| 110 |
-
|
| 111 |
-
# Placeholder for displaying logs
|
| 112 |
-
# if st.button("Refresh Log"):
|
| 113 |
-
# st.dataframe(LogManager().get_logs())
|
| 114 |
|
| 115 |
# --- EXECUTION ---
|
| 116 |
if __name__ == "__main__":
|
|
|
|
| 117 |
main()
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import cv2 # OpenCV for image processing
|
| 3 |
import numpy as np
|
| 4 |
+
# NOTE: Use VideoProcessorBase as the primary class as VideoTransformerBase is deprecated
|
| 5 |
+
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase, WebRtcMode
|
| 6 |
|
| 7 |
# --- PLACEHOLDER IMPORTS (UNCOMMENT/ADJUST AS NEEDED) ---
|
| 8 |
# NOTE: Make sure these core libraries are in your requirements.txt
|
| 9 |
# import deepface
|
|
|
|
| 10 |
# from src.detect import detect_faces
|
| 11 |
# from src.recognize import recognize_face
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# --- CONFIGURATION ---
|
|
|
|
| 14 |
RECOGNITION_THRESHOLD = 0.6
|
| 15 |
FRAME_SKIP = 3 # Process every 3rd frame for performance
|
| 16 |
|
| 17 |
|
| 18 |
# --- VIDEO PROCESSING CLASS ---
|
| 19 |
+
# Using VideoProcessorBase to align with current streamlit-webrtc practices
|
| 20 |
+
class FaceRecognitionProcessor(VideoProcessorBase):
|
| 21 |
"""
|
| 22 |
A class that processes video frames in real-time for face recognition.
|
| 23 |
"""
|
| 24 |
def __init__(self):
|
| 25 |
+
# Initialize models once here
|
|
|
|
|
|
|
| 26 |
self.frame_count = 0
|
| 27 |
+
# ... your model loading initialization ...
|
|
|
|
| 28 |
|
| 29 |
+
def recv(self, frame):
|
| 30 |
+
# The frame is now an av.VideoFrame object, convert to numpy array
|
| 31 |
+
img = frame.to_ndarray(format="bgr24")
|
| 32 |
|
|
|
|
|
|
|
| 33 |
self.frame_count += 1
|
|
|
|
|
|
|
| 34 |
if self.frame_count % FRAME_SKIP != 0:
|
| 35 |
+
return frame # Return the original frame if skipping
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
# --- Your Face Detection and Recognition Logic Goes Here ---
|
| 38 |
+
# Example placeholder logic:
|
| 39 |
h, w, _ = img.shape
|
|
|
|
| 40 |
faces = [(w//4, h//4, w//2, h//2)] # Placeholder bounding box
|
| 41 |
|
| 42 |
for (x, y, w, h) in faces:
|
| 43 |
+
recognized_name = "Unknown"
|
|
|
|
|
|
|
| 44 |
score = 0.0
|
| 45 |
+
color = (0, 0, 255)
|
| 46 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
|
|
|
|
|
|
|
| 48 |
label = f"{recognized_name}: {score:.2f}"
|
| 49 |
+
cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
|
|
|
|
| 50 |
|
| 51 |
+
# Convert back to av.VideoFrame before returning
|
| 52 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
| 53 |
|
| 54 |
# --- STREAMLIT UI ---
|
| 55 |
|
|
|
|
| 59 |
st.title("Smart Office Face Recognition System 📸")
|
| 60 |
st.sidebar.title("Configuration")
|
| 61 |
|
|
|
|
| 62 |
RECOGNITION_THRESHOLD = st.sidebar.slider(
|
| 63 |
"Recognition Threshold", min_value=0.0, max_value=1.0, value=0.6, step=0.05
|
| 64 |
)
|
| 65 |
|
| 66 |
+
# Use a stable key
|
| 67 |
+
STREAMER_KEY = "face-recognition-stream-final"
|
| 68 |
+
|
| 69 |
+
# --- CRITICAL FIX: The webrtc_streamer call itself must be outside the wrapper ---
|
| 70 |
+
# The fix is to ensure the component is always called/rendered, but the initialization
|
| 71 |
+
# of heavy resources (models) is safely handled inside a cached function (if needed).
|
| 72 |
+
|
| 73 |
webrtc_streamer(
|
| 74 |
+
key=STREAMER_KEY,
|
| 75 |
mode=WebRtcMode.SENDRECV,
|
| 76 |
|
| 77 |
+
# --- FIX 2: Enhanced STUN/TURN configuration to resolve aioice errors ---
|
| 78 |
rtc_configuration={
|
| 79 |
"iceServers": [
|
| 80 |
{"urls": ["stun:stun.l.google.com:19302"]},
|
|
|
|
| 82 |
]
|
| 83 |
},
|
| 84 |
|
| 85 |
+
# NOTE: Using video_processor_factory and the VideoProcessorBase class
|
| 86 |
+
video_processor_factory=FaceRecognitionProcessor,
|
| 87 |
+
async_processing=True
|
| 88 |
)
|
| 89 |
|
| 90 |
st.markdown("---")
|
| 91 |
+
# ... (rest of main)
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
# --- EXECUTION ---
|
| 94 |
if __name__ == "__main__":
|
| 95 |
+
# You MUST also install the package 'av' (pip install av) to use VideoProcessorBase
|
| 96 |
main()
|