Sohan2004 commited on
Commit
2e0ce7a
Β·
verified Β·
1 Parent(s): 0c5011a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -15
app.py CHANGED
@@ -1,38 +1,168 @@
1
  import gradio as gr
2
- from stress_detection import detect_stress_from_image # Import your function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  # Create Gradio interface
5
- with gr.Blocks(title="Stress Detection System") as demo:
6
  gr.Markdown("# 🧠 Stress Detection System")
7
- gr.Markdown("### AI-Powered Real-Time Stress Analysis")
 
8
 
9
  with gr.Row():
10
  with gr.Column():
11
  image_input = gr.Image(
12
  sources=["webcam"],
13
  type="numpy",
14
- label="πŸ“Ή Webcam"
15
  )
16
- analyze_btn = gr.Button("πŸ” Analyze Stress", variant="primary", size="lg")
17
-
 
 
 
 
 
 
 
 
18
  with gr.Column():
19
  output_label = gr.Label(
20
  num_top_classes=2,
21
- label="πŸ“Š Results"
22
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- gr.Markdown("### πŸ“ How to use:")
25
- gr.Markdown("1. Click the webcam icon to enable your camera")
26
- gr.Markdown("2. Position your face in the frame")
27
- gr.Markdown("3. Click **Analyze Stress** to get prediction")
28
-
29
- # Connect button to your function
30
  analyze_btn.click(
31
- fn=detect_stress_from_image, # Your function name
32
  inputs=image_input,
33
  outputs=output_label
34
  )
 
 
 
 
 
 
 
35
 
36
  # Launch
37
  if __name__ == "__main__":
38
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import cv2
3
+ import mediapipe as mp
4
+ import numpy as np
5
+ from stress_detection import * # Import your entire code
6
+
7
+ # Initialize detectors (create once, reuse for all images)
8
+ au01 = AU01Detector()
9
+ au04 = AU04Detector()
10
+ au06 = AU06Detector()
11
+ au07 = AU07Detector()
12
+ au12 = AU12Detector()
13
+ au14 = AU14Detector()
14
+ au17 = AU17Detector()
15
+ au23 = AU23Detector()
16
+ au24 = AU24Detector()
17
+ au26 = AU26Detector()
18
+
19
+ detectors = [au01, au04, au06, au07, au12, au14, au17, au23, au24, au26]
20
+
21
+ # Initialize MediaPipe
22
+ mp_face_mesh = mp.solutions.face_mesh
23
+ face_mesh = mp_face_mesh.FaceMesh(
24
+ min_detection_confidence=0.5,
25
+ min_tracking_confidence=0.5,
26
+ refine_landmarks=True
27
+ )
28
+
29
+ def detect_stress_from_single_image(image):
30
+ """
31
+ Process a single image and return stress prediction
32
+
33
+ Args:
34
+ image: numpy array (BGR from Gradio)
35
+
36
+ Returns:
37
+ dict: Stress classification with probabilities
38
+ """
39
+ if image is None:
40
+ return {"Error": "No image provided"}
41
+
42
+ try:
43
+ # Get image dimensions
44
+ frame_height, frame_width = image.shape[:2]
45
+
46
+ # Convert to RGB for MediaPipe
47
+ rgb_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
48
+
49
+ # Process with MediaPipe
50
+ results = face_mesh.process(rgb_frame)
51
+
52
+ if not results.multi_face_landmarks:
53
+ return {"Error": "No face detected in image"}
54
+
55
+ landmarks = results.multi_face_landmarks[0].landmark
56
+
57
+ # Detect all 10 AUs
58
+ au01_active, au01_intensity = au01.detect(landmarks, frame_width, frame_height)
59
+ au04_active, au04_intensity = au04.detect(landmarks, frame_width, frame_height)
60
+ au06_active, au06_intensity = au06.detect(landmarks, frame_width, frame_height)
61
+ au07_active, au07_intensity = au07.detect(landmarks, frame_width, frame_height)
62
+ au12_active, au12_intensity = au12.detect(landmarks, frame_width, frame_height)
63
+ au14_active, au14_intensity = au14.detect(landmarks, frame_width, frame_height)
64
+ au17_active, au17_intensity = au17.detect(landmarks, frame_width, frame_height)
65
+ au23_active, au23_intensity = au23.detect(landmarks, frame_width, frame_height)
66
+ au24_active, au24_intensity = au24.detect(landmarks, frame_width, frame_height)
67
+ au26_active, au26_intensity = au26.detect(landmarks, frame_width, frame_height)
68
+
69
+ # Calculate simple stress score from single frame
70
+ stress_aus_intensity = (au01_intensity + au04_intensity + au07_intensity +
71
+ au17_intensity + au23_intensity + au24_intensity) / 6
72
+
73
+ positive_aus_intensity = (au06_intensity + au12_intensity + au14_intensity) / 3
74
+
75
+ # Simple scoring for single frame
76
+ stress_score = stress_aus_intensity - (positive_aus_intensity * 0.3)
77
+ stress_score = min(100, max(0, stress_score))
78
+
79
+ # Classification
80
+ if stress_score < 30:
81
+ return {
82
+ "Not Stressed 😊": float(100 - stress_score) / 100,
83
+ "Stressed 😰": float(stress_score) / 100
84
+ }
85
+ else:
86
+ return {
87
+ "Stressed 😰": float(stress_score) / 100,
88
+ "Not Stressed 😊": float(100 - stress_score) / 100
89
+ }
90
+
91
+ except Exception as e:
92
+ return {"Error": f"Processing failed: {str(e)}"}
93
+
94
 
95
  # Create Gradio interface
96
+ with gr.Blocks(title="Stress Detection System", theme=gr.themes.Soft()) as demo:
97
  gr.Markdown("# 🧠 Stress Detection System")
98
+ gr.Markdown("### AI-Powered Facial Expression Analysis using 10 Action Units (FACS)")
99
+ gr.Markdown("*Based on Facial Action Coding System - Ekman & Friesen*")
100
 
101
  with gr.Row():
102
  with gr.Column():
103
  image_input = gr.Image(
104
  sources=["webcam"],
105
  type="numpy",
106
+ label="πŸ“Ή Capture Your Face"
107
  )
108
+ analyze_btn = gr.Button("πŸ” Analyze Stress Level", variant="primary", size="lg")
109
+
110
+ gr.Markdown("""
111
+ ### πŸ“ How to use:
112
+ 1. Click the **webcam icon** above
113
+ 2. Allow camera access
114
+ 3. Position your face clearly in frame
115
+ 4. Click **Analyze Stress Level**
116
+ """)
117
+
118
  with gr.Column():
119
  output_label = gr.Label(
120
  num_top_classes=2,
121
+ label="πŸ“Š Stress Analysis Results"
122
  )
123
+
124
+ gr.Markdown("""
125
+ ### πŸ”¬ Action Units Detected:
126
+ **Stress Indicators:**
127
+ - AU01: Inner Brow Raise
128
+ - AU04: Brow Lowerer
129
+ - AU07: Lid Tightener
130
+ - AU17: Chin Raiser
131
+ - AU23: Lip Tightener
132
+ - AU24: Lip Pressor
133
+
134
+ **Positive Indicators:**
135
+ - AU06: Cheek Raiser
136
+ - AU12: Lip Corner Puller
137
+ - AU14: Dimpler
138
+ - AU26: Jaw Drop
139
+ """)
140
 
141
+ # Connect button
 
 
 
 
 
142
  analyze_btn.click(
143
+ fn=detect_stress_from_single_image,
144
  inputs=image_input,
145
  outputs=output_label
146
  )
147
+
148
+ gr.Markdown("""
149
+ ---
150
+ ### ℹ️ About
151
+ This system uses **10 Facial Action Units** based on the Facial Action Coding System (FACS)
152
+ to detect stress through micro-expressions. Developed under the guidance of **Prof. Anup Nandy**.
153
+ """)
154
 
155
  # Launch
156
  if __name__ == "__main__":
157
+ demo.launch()
158
+ ```
159
+
160
+ ---
161
+
162
+ ## Final File Structure in Your Space:
163
+ ```
164
+ your-space/
165
+ β”œβ”€β”€ app.py # ← Gradio interface (code above)
166
+ β”œβ”€β”€ stress_detection.py # ← Your complete code (paste as-is)
167
+ β”œβ”€β”€ requirements.txt # ← 6 packages listed above
168
+ └── README.md # ← Update with proper metadata