Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import cv2 | |
| import mediapipe as mp | |
| import numpy as np | |
| import base64 | |
| from io import BytesIO | |
| from PIL import Image | |
| # Import your detector classes | |
| from stress_detection import ( | |
| AU01Detector, AU04Detector, AU06Detector, AU07Detector, | |
| AU12Detector, AU14Detector, AU17Detector, AU23Detector, | |
| AU24Detector, AU26Detector, mp_face_mesh | |
| ) | |
| # Initialize all detectors globally | |
| au01 = AU01Detector(window_size=10) | |
| au04 = AU04Detector(window_size=10) | |
| au06 = AU06Detector(window_size=10) | |
| au07 = AU07Detector(window_size=10) | |
| au12 = AU12Detector(window_size=10) | |
| au14 = AU14Detector(window_size=10) | |
| au17 = AU17Detector(window_size=10) | |
| au23 = AU23Detector(window_size=10) | |
| au24 = AU24Detector(window_size=10) | |
| au26 = AU26Detector(window_size=10) | |
| detectors = [au01, au04, au06, au07, au12, au14, au17, au23, au24, au26] | |
| # Initialize MediaPipe | |
| face_mesh = mp_face_mesh.FaceMesh( | |
| min_detection_confidence=0.5, | |
| min_tracking_confidence=0.5, | |
| refine_landmarks=True | |
| ) | |
| def process_frame(frame): | |
| """ | |
| Process a single frame from webcam | |
| Returns: processed frame with overlays + stress analysis | |
| """ | |
| if frame is None: | |
| return None, "No frame received" | |
| try: | |
| # Convert from RGB (Gradio) to BGR (OpenCV) | |
| frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) | |
| frame_height, frame_width = frame_bgr.shape[:2] | |
| # Process with MediaPipe | |
| rgb_frame = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB) | |
| results = face_mesh.process(rgb_frame) | |
| if not results.multi_face_landmarks: | |
| cv2.putText(frame_bgr, "No face detected", (50, 50), | |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) | |
| return cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB), "β οΈ No face detected" | |
| landmarks = results.multi_face_landmarks[0].landmark | |
| # Detect all AUs | |
| au01_active, au01_intensity = au01.detect(landmarks, frame_width, frame_height) | |
| au04_active, au04_intensity = au04.detect(landmarks, frame_width, frame_height) | |
| au06_active, au06_intensity = au06.detect(landmarks, frame_width, frame_height) | |
| au07_active, au07_intensity = au07.detect(landmarks, frame_width, frame_height) | |
| au12_active, au12_intensity = au12.detect(landmarks, frame_width, frame_height) | |
| au14_active, au14_intensity = au14.detect(landmarks, frame_width, frame_height) | |
| au17_active, au17_intensity = au17.detect(landmarks, frame_width, frame_height) | |
| au23_active, au23_intensity = au23.detect(landmarks, frame_width, frame_height) | |
| au24_active, au24_intensity = au24.detect(landmarks, frame_width, frame_height) | |
| au26_active, au26_intensity = au26.detect(landmarks, frame_width, frame_height) | |
| # Draw overlays (like your original code) | |
| overlay = frame_bgr.copy() | |
| # Create semi-transparent overlay panels | |
| cv2.rectangle(overlay, (5, 5), (300, 200), (50, 50, 50), -1) | |
| cv2.rectangle(overlay, (frame_width - 305, 5), (frame_width - 5, 150), (50, 50, 50), -1) | |
| frame_bgr = cv2.addWeighted(overlay, 0.7, frame_bgr, 0.3, 0) | |
| # Left panel: Stress Indicators | |
| y_offset = 25 | |
| cv2.putText(frame_bgr, "STRESS INDICATORS:", (10, y_offset), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) | |
| y_offset += 25 | |
| stress_aus = [ | |
| (au01_active, au01_intensity, "AU01-BrowRaise"), | |
| (au04_active, au04_intensity, "AU04-BrowLower"), | |
| (au07_active, au07_intensity, "AU07-LidTight"), | |
| (au17_active, au17_intensity, "AU17-ChinRaise"), | |
| (au23_active, au23_intensity, "AU23-LipTight"), | |
| (au24_active, au24_intensity, "AU24-LipPress") | |
| ] | |
| stress_count = 0 | |
| for active, intensity, name in stress_aus: | |
| if active: | |
| stress_count += 1 | |
| color = (0, 0, 255) if active else (150, 150, 150) | |
| cv2.putText(frame_bgr, f"{name}: {intensity:.0f}%", | |
| (15, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.4, color, 1) | |
| y_offset += 22 | |
| # Right panel: Positive Indicators | |
| y_offset = 25 | |
| x_right = frame_width - 300 | |
| cv2.putText(frame_bgr, "POSITIVE:", (x_right, y_offset), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
| y_offset += 25 | |
| positive_aus = [ | |
| (au06_active, au06_intensity, "AU06-Cheek"), | |
| (au12_active, au12_intensity, "AU12-Smile"), | |
| (au14_active, au14_intensity, "AU14-Dimple"), | |
| (au26_active, au26_intensity, "AU26-Jaw") | |
| ] | |
| positive_count = 0 | |
| for active, intensity, name in positive_aus: | |
| if active: | |
| positive_count += 1 | |
| color = (0, 255, 0) if active else (150, 150, 150) | |
| cv2.putText(frame_bgr, f"{name}: {intensity:.0f}%", | |
| (x_right, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.4, color, 1) | |
| y_offset += 22 | |
| # Bottom status bar | |
| cv2.rectangle(frame_bgr, (0, frame_height - 40), (frame_width, frame_height), (40, 40, 40), -1) | |
| # Simple stress calculation | |
| stress_score = (au01_intensity + au04_intensity + au07_intensity + | |
| au17_intensity + au23_intensity + au24_intensity) / 6 | |
| if stress_score > 50: | |
| status = "π΄ STRESSED" | |
| status_color = (0, 0, 255) | |
| elif stress_score > 25: | |
| status = "π‘ POSSIBLY STRESSED" | |
| status_color = (0, 165, 255) | |
| else: | |
| status = "π’ NOT STRESSED" | |
| status_color = (0, 255, 0) | |
| cv2.putText(frame_bgr, f"{status} | Score: {stress_score:.1f}/100 | Stress AUs: {stress_count}/6 | Positive: {positive_count}/4", | |
| (10, frame_height - 12), cv2.FONT_HERSHEY_SIMPLEX, 0.6, status_color, 2) | |
| # Convert back to RGB for Gradio | |
| output_frame = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB) | |
| # Create detailed text output | |
| analysis_text = f""" | |
| π― CURRENT ANALYSIS: | |
| {status} | |
| Stress Score: {stress_score:.1f}/100 | |
| π STRESS INDICATORS: | |
| - AU01 (Inner Brow): {au01_intensity:.0f}% {'β ACTIVE' if au01_active else ''} | |
| - AU04 (Brow Lower): {au04_intensity:.0f}% {'β ACTIVE' if au04_active else ''} | |
| - AU07 (Lid Tighten): {au07_intensity:.0f}% {'β ACTIVE' if au07_active else ''} | |
| - AU17 (Chin Raise): {au17_intensity:.0f}% {'β ACTIVE' if au17_active else ''} | |
| - AU23 (Lip Tighten): {au23_intensity:.0f}% {'β ACTIVE' if au23_active else ''} | |
| - AU24 (Lip Press): {au24_intensity:.0f}% {'β ACTIVE' if au24_active else ''} | |
| π POSITIVE INDICATORS: | |
| - AU06 (Cheek Raise): {au06_intensity:.0f}% {'β ACTIVE' if au06_active else ''} | |
| - AU12 (Smile): {au12_intensity:.0f}% {'β ACTIVE' if au12_active else ''} | |
| - AU14 (Dimpler): {au14_intensity:.0f}% {'β ACTIVE' if au14_active else ''} | |
| - AU26 (Jaw Drop): {au26_intensity:.0f}% {'β ACTIVE' if au26_active else ''} | |
| """ | |
| return output_frame, analysis_text | |
| except Exception as e: | |
| return frame, f"β Error: {str(e)}" | |
| # Create Gradio Interface | |
| with gr.Blocks(title="Real-Time Stress Detection", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π§ Real-Time 10-AU Stress Detection System") | |
| gr.Markdown("### Live Facial Expression Analysis - Based on FACS") | |
| gr.Markdown("*Research Guide: Prof. Anup Nandy*") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| webcam = gr.Image( | |
| sources=["webcam"], | |
| streaming=True, | |
| label="πΉ Live Webcam Feed" | |
| ) | |
| gr.Markdown(""" | |
| ### π Instructions: | |
| 1. **Allow camera access** when prompted | |
| 2. **Position your face** clearly in the frame | |
| 3. **Analysis runs automatically** in real-time | |
| 4. Watch the overlay for instant AU detection | |
| """) | |
| with gr.Column(scale=1): | |
| analysis_output = gr.Textbox( | |
| label="π Real-Time Analysis", | |
| lines=20, | |
| value="Waiting for camera..." | |
| ) | |
| gr.Markdown(""" | |
| --- | |
| ### π¬ Action Units Being Tracked: | |
| **Stress Indicators:** AU01, AU04, AU07, AU17, AU23, AU24 | |
| **Positive Indicators:** AU06, AU12, AU14, AU26 | |
| The system analyzes your facial expressions in real-time and provides instant feedback | |
| on stress levels based on the Facial Action Coding System (FACS). | |
| """) | |
| # Process streaming webcam | |
| webcam.stream( | |
| fn=process_frame, | |
| inputs=[webcam], | |
| outputs=[webcam, analysis_output], | |
| stream_every=0.1 # Process every 100ms (10 FPS) | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |