treamyracle commited on
Commit
ddb8c75
·
verified ·
1 Parent(s): 07cd1f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +251 -251
app.py CHANGED
@@ -1,252 +1,252 @@
1
- import gradio as gr
2
- import cv2
3
- import numpy as np
4
- from ultralytics import YOLO
5
- from collections import deque
6
- import math
7
-
8
- # --- CONFIGURATION ---
9
- # Pastikan file model sudah diupload di satu folder yang sama dengan app.py
10
- MODEL_DET_PATH = "best.pt"
11
- MODEL_POSE_PATH = "yolo11x-pose.pt" # Pertimbangkan ganti ke yolo11n-pose.pt jika lag
12
-
13
- # --- LOGIC CONFIGURATION ---
14
- CONF_THRESHOLD = 0.4
15
- HEAD_DROP_THRESHOLD = 110
16
- MOVEMENT_THRESHOLD = 50.0
17
- WRIST_CONF_THRESHOLD = 0.3
18
- EXTENSION_RATIO = 0.6
19
- PALM_BEND_ANGLE = 0
20
- WINDOW_SIZE = 10
21
- WINDOW_THRESHOLD = 6
22
- HAND_STREAK_REQUIRED = 2
23
-
24
- # Output Size untuk Web
25
- OUTPUT_WIDTH = 640
26
- OUTPUT_HEIGHT = 480
27
- OUTPUT_SIZE = (OUTPUT_WIDTH, OUTPUT_HEIGHT)
28
-
29
- # --- LOAD MODELS ---
30
- print("Loading Models...")
31
- try:
32
- model_det = YOLO(MODEL_DET_PATH)
33
- model_pose = YOLO(MODEL_POSE_PATH)
34
- print("✅ Models Loaded Successfully!")
35
- except Exception as e:
36
- print(f"❌ Error loading model: {e}")
37
- # Kita tidak exit() agar Space tidak crash, tapi akan error saat run
38
- pass
39
-
40
- # --- HELPERS (Sama seperti kode Anda) ---
41
- def is_point_in_box(point, box):
42
- if box is None: return False
43
- px, py = point
44
- x1, y1, x2, y2 = box
45
- return x1 < px < x2 and y1 < py < y2
46
-
47
- def rotate_vector(vx, vy, angle_degrees):
48
- rad = math.radians(angle_degrees)
49
- cos_a = math.cos(rad)
50
- sin_a = math.sin(rad)
51
- new_vx = vx * cos_a - vy * sin_a
52
- new_vy = vx * sin_a + vy * cos_a
53
- return new_vx, new_vy
54
-
55
- # --- LOGIC CLASS ---
56
- class DrowsinessDetector:
57
- def __init__(self):
58
- self.history = deque(maxlen=WINDOW_SIZE)
59
- self.l_wrist_hist = deque(maxlen=WINDOW_SIZE)
60
- self.r_wrist_hist = deque(maxlen=WINDOW_SIZE)
61
- self.pose_hand_streak = 0
62
- self.det_hand_streak = 0
63
-
64
- def analyze_activity_level(self, wrist_history):
65
- if len(wrist_history) < 5: return False
66
- points = np.array([p for p in wrist_history if p is not None])
67
- if len(points) < 5: return False
68
- std_x = np.std(points[:, 0])
69
- std_y = np.std(points[:, 1])
70
- return ((std_x + std_y) / 2) > MOVEMENT_THRESHOLD
71
-
72
- def get_pose_features(self, frame):
73
- # ... (Logika pose Anda, disederhanakan untuk Gradio) ...
74
- # Catatan: Karena kompleksitas kode asli, saya menyalin inti logikanya
75
- # namun membuang bagian force_det untuk efisiensi di web.
76
-
77
- annotated_frame = frame.copy()
78
- is_head_drop = False
79
- is_hand_near_mouth = False
80
- l_wrist_pt = None; r_wrist_pt = None; face_box = None
81
- l_wrist_conf = None; r_wrist_conf = None
82
-
83
- H, W = frame.shape[:2]
84
-
85
- try:
86
- res = model_pose.predict(frame, conf=0.5, verbose=False)[0]
87
- # annotated_frame = res.plot() # Opsional: plot bawaan yolo
88
-
89
- # Ambil orang pertama saja
90
- if hasattr(res.keypoints, "data") and len(res.keypoints.data) > 0:
91
- kpts = res.keypoints.data[0].cpu().numpy()
92
-
93
- # Index keypoints COCO standard
94
- # 0:Nose, 5:L-Shoulder, 6:R-Shoulder, 7:L-Elbow, 8:R-Elbow, 9:L-Wrist, 10:R-Wrist
95
- nose = kpts[0][:2]
96
- l_shoulder = kpts[5][:2]; r_shoulder = kpts[6][:2]
97
- l_wrist = kpts[9][:2]; r_wrist = kpts[10][:2]
98
- l_elbow = kpts[7][:2]; r_elbow = kpts[8][:2]
99
-
100
- l_wrist_conf = float(kpts[9][2]); r_wrist_conf = float(kpts[10][2])
101
-
102
- # 1. Logic Perpanjangan Tangan (Sama seperti kode Anda)
103
- if l_wrist[0] != 0 and l_elbow[0] != 0:
104
- vx = l_wrist[0] - l_elbow[0]; vy = l_wrist[1] - l_elbow[1]
105
- rvx, rvy = rotate_vector(vx, vy, PALM_BEND_ANGLE)
106
- l_wrist[0] += rvx * EXTENSION_RATIO; l_wrist[1] += rvy * EXTENSION_RATIO
107
-
108
- if r_wrist[0] != 0 and r_elbow[0] != 0:
109
- vx = r_wrist[0] - r_elbow[0]; vy = r_wrist[1] - r_elbow[1]
110
- rvx, rvy = rotate_vector(vx, vy, -PALM_BEND_ANGLE)
111
- r_wrist[0] += rvx * EXTENSION_RATIO; r_wrist[1] += rvy * EXTENSION_RATIO
112
-
113
- l_wrist_pt = (float(l_wrist[0]), float(l_wrist[1]))
114
- r_wrist_pt = (float(r_wrist[0]), float(r_wrist[1]))
115
-
116
- # 2. Head Drop Logic
117
- if nose[0] != 0 and (l_shoulder[0] != 0 or r_shoulder[0] != 0):
118
- sy = (l_shoulder[1] + r_shoulder[1]) / 2 if (l_shoulder[0]*r_shoulder[0])!=0 else max(l_shoulder[1], r_shoulder[1])
119
- dist = sy - nose[1]
120
- cv2.putText(annotated_frame, f"Head Dist: {int(dist)}", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,0), 1)
121
- if dist < HEAD_DROP_THRESHOLD: is_head_drop = True
122
-
123
- # 3. Face Box Est (Simple from keypoints)
124
- # Ambil min/max dari mata, hidung, telinga untuk kotak wajah
125
- face_pts = kpts[0:5, :2] # nose, eyes, ears
126
- valid_pts = face_pts[face_pts[:,0] > 0]
127
- if len(valid_pts) > 0:
128
- x1, y1 = valid_pts.min(axis=0); x2, y2 = valid_pts.max(axis=0)
129
- pad = 20
130
- face_box = [max(0, x1-pad), max(0, y1-pad), min(W, x2+pad), min(H, y2+pad + 50)] # +50 ke bawah dagu
131
- cv2.rectangle(annotated_frame, (int(face_box[0]), int(face_box[1])), (int(face_box[2]), int(face_box[3])), (255,255,0), 2)
132
-
133
- # 4. Wrist near Mouth
134
- if face_box:
135
- if l_wrist_conf > WRIST_CONF_THRESHOLD and is_point_in_box(l_wrist_pt, face_box): is_hand_near_mouth = True
136
- if r_wrist_conf > WRIST_CONF_THRESHOLD and is_point_in_box(r_wrist_pt, face_box): is_hand_near_mouth = True
137
-
138
- except Exception as e:
139
- print(f"Pose Error: {e}")
140
- pass
141
-
142
- return is_head_drop, is_hand_near_mouth, annotated_frame, l_wrist_pt, r_wrist_pt, face_box, (l_wrist_conf, r_wrist_conf)
143
-
144
- def process_frame(self, frame):
145
- # Gradio pass RGB, opencv needs BGR
146
- frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
147
- H, W = frame.shape[:2]
148
-
149
- # 1. Detection
150
- res_det = model_det.predict(frame, conf=CONF_THRESHOLD, verbose=False)[0]
151
-
152
- # 2. Pose
153
- is_head_drop, is_hand_in_box, pose_frame, lw, rw, face_box, wrist_confs = self.get_pose_features(frame)
154
-
155
- # 3. Det Logic (Simple Overlap check)
156
- det_hand_mouth_near = False
157
- try:
158
- for box, cls in zip(res_det.boxes.xyxy, res_det.boxes.cls):
159
- if model_det.names[int(cls)] == "Hand_Mouth" and face_box:
160
- # Simple check: box center in face box
161
- bx1, by1, bx2, by2 = box.cpu().numpy()
162
- cx, cy = (bx1+bx2)/2, (by1+by2)/2
163
- if is_point_in_box((cx, cy), face_box):
164
- det_hand_mouth_near = True
165
- except: pass
166
-
167
- # 4. Streak
168
- if is_hand_in_box: self.pose_hand_streak += 1
169
- else: self.pose_hand_streak = 0
170
-
171
- if det_hand_mouth_near: self.det_hand_streak += 1
172
- else: self.det_hand_streak = 0
173
-
174
- flag_hand_mouth = (self.pose_hand_streak >= HAND_STREAK_REQUIRED) or (self.det_hand_streak >= HAND_STREAK_REQUIRED)
175
-
176
- # 5. Activity
177
- self.l_wrist_hist.append(lw if (wrist_confs[0] or 0) >= WRIST_CONF_THRESHOLD else None)
178
- self.r_wrist_hist.append(rw if (wrist_confs[1] or 0) >= WRIST_CONF_THRESHOLD else None)
179
-
180
- is_active = self.analyze_activity_level(self.l_wrist_hist) or self.analyze_activity_level(self.r_wrist_hist)
181
-
182
- # 6. Combined Logic (Simplified)
183
- labels_set = set([model_det.names[int(c)] for c in res_det.boxes.cls.cpu().numpy()]) if hasattr(res_det.boxes, 'cls') else set()
184
-
185
- # Instant Drowsy Check
186
- is_drowsy_frame = False
187
- if not flag_hand_mouth:
188
- if "Closed_Eyes" in labels_set and "Open_Mouth" in labels_set: is_drowsy_frame = True
189
- elif "Closed_Eyes" in labels_set and "Closed_Mouth" in labels_set: is_drowsy_frame = True
190
- elif is_head_drop: is_drowsy_frame = True
191
-
192
- # 7. Sliding Window Logic
193
- if is_active or flag_hand_mouth:
194
- self.history.append(0)
195
- else:
196
- self.history.append(1 if is_drowsy_frame else 0)
197
-
198
- drowsy_score = sum(self.history)
199
- is_drowsy_window = (drowsy_score >= WINDOW_THRESHOLD)
200
-
201
- # 8. Visualization
202
- status_text = "AMAN"
203
- status_color = (0, 255, 0)
204
-
205
- if flag_hand_mouth:
206
- status_text = "BAHAYA! (Hand on Face)"
207
- status_color = (0, 0, 255)
208
- elif is_drowsy_window:
209
- status_text = "BAHAYA! (Mengantuk)"
210
- status_color = (0, 0, 255)
211
- elif is_active:
212
- status_text = "AMAN (Aktif)"
213
- status_color = (0, 255, 0)
214
-
215
- # Draw HUD
216
- annotated = res_det.plot() # Gambar kotak deteksi
217
-
218
- # Overlay Pose Lines
219
- if face_box:
220
- cv2.rectangle(annotated, (int(face_box[0]), int(face_box[1])), (int(face_box[2]), int(face_box[3])), (255,255,0), 2)
221
-
222
- if lw: cv2.circle(annotated, (int(lw[0]), int(lw[1])), 5, (0,255,255), -1)
223
- if rw: cv2.circle(annotated, (int(rw[0]), int(rw[1])), 5, (0,255,255), -1)
224
-
225
- # Top Bar
226
- cv2.rectangle(annotated, (0, 0), (annotated.shape[1], 80), (0, 0, 0), -1)
227
- cv2.putText(annotated, f"STATUS: {status_text}", (20, 35), 0, 0.8, status_color, 2)
228
- cv2.putText(annotated, f"Score: {drowsy_score}/{WINDOW_SIZE} | Act: {is_active}", (20, 65), 0, 0.6, (200, 200, 200), 1)
229
-
230
- return cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
231
-
232
- # --- GRADIO INTERFACE ---
233
- detector = DrowsinessDetector()
234
-
235
- def inference(frame):
236
- if frame is None: return None
237
- return detector.process_frame(frame)
238
-
239
- if __name__ == "__main__":
240
- with gr.Blocks() as demo:
241
- gr.Markdown("# 😴 AI Drowsiness Detector (YOLOv11)")
242
- gr.Markdown("Pastikan webcam aktif. Deteksi mengantuk berbasis Pose & Object Detection.")
243
-
244
- with gr.Row():
245
- # Input webcam, streaming=True agar real-time
246
- input_video = gr.Image(source="webcam", streaming=True, label="Webcam Input", tool=None)
247
- output_video = gr.Image(label="AI Output")
248
-
249
- # Saat webcam berubah (tiap frame), panggil fungsi inference
250
- input_video.change(fn=inference, inputs=input_video, outputs=output_video)
251
-
252
  demo.launch()
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ from ultralytics import YOLO
5
+ from collections import deque
6
+ import math
7
+
8
+ # --- CONFIGURATION ---
9
+ # Pastikan file model sudah diupload di satu folder yang sama dengan app.py
10
+ MODEL_DET_PATH = "best.pt"
11
+ MODEL_POSE_PATH = "yolo11x-pose.pt" # Pertimbangkan ganti ke yolo11n-pose.pt jika lag
12
+
13
+ # --- LOGIC CONFIGURATION ---
14
+ CONF_THRESHOLD = 0.4
15
+ HEAD_DROP_THRESHOLD = 110
16
+ MOVEMENT_THRESHOLD = 50.0
17
+ WRIST_CONF_THRESHOLD = 0.3
18
+ EXTENSION_RATIO = 0.6
19
+ PALM_BEND_ANGLE = 0
20
+ WINDOW_SIZE = 10
21
+ WINDOW_THRESHOLD = 6
22
+ HAND_STREAK_REQUIRED = 2
23
+
24
+ # Output Size untuk Web
25
+ OUTPUT_WIDTH = 640
26
+ OUTPUT_HEIGHT = 480
27
+ OUTPUT_SIZE = (OUTPUT_WIDTH, OUTPUT_HEIGHT)
28
+
29
+ # --- LOAD MODELS ---
30
+ print("Loading Models...")
31
+ try:
32
+ model_det = YOLO(MODEL_DET_PATH)
33
+ model_pose = YOLO(MODEL_POSE_PATH)
34
+ print("✅ Models Loaded Successfully!")
35
+ except Exception as e:
36
+ print(f"❌ Error loading model: {e}")
37
+ # Kita tidak exit() agar Space tidak crash, tapi akan error saat run
38
+ pass
39
+
40
+ # --- HELPERS (Sama seperti kode Anda) ---
41
+ def is_point_in_box(point, box):
42
+ if box is None: return False
43
+ px, py = point
44
+ x1, y1, x2, y2 = box
45
+ return x1 < px < x2 and y1 < py < y2
46
+
47
+ def rotate_vector(vx, vy, angle_degrees):
48
+ rad = math.radians(angle_degrees)
49
+ cos_a = math.cos(rad)
50
+ sin_a = math.sin(rad)
51
+ new_vx = vx * cos_a - vy * sin_a
52
+ new_vy = vx * sin_a + vy * cos_a
53
+ return new_vx, new_vy
54
+
55
+ # --- LOGIC CLASS ---
56
+ class DrowsinessDetector:
57
+ def __init__(self):
58
+ self.history = deque(maxlen=WINDOW_SIZE)
59
+ self.l_wrist_hist = deque(maxlen=WINDOW_SIZE)
60
+ self.r_wrist_hist = deque(maxlen=WINDOW_SIZE)
61
+ self.pose_hand_streak = 0
62
+ self.det_hand_streak = 0
63
+
64
+ def analyze_activity_level(self, wrist_history):
65
+ if len(wrist_history) < 5: return False
66
+ points = np.array([p for p in wrist_history if p is not None])
67
+ if len(points) < 5: return False
68
+ std_x = np.std(points[:, 0])
69
+ std_y = np.std(points[:, 1])
70
+ return ((std_x + std_y) / 2) > MOVEMENT_THRESHOLD
71
+
72
+ def get_pose_features(self, frame):
73
+ # ... (Logika pose Anda, disederhanakan untuk Gradio) ...
74
+ # Catatan: Karena kompleksitas kode asli, saya menyalin inti logikanya
75
+ # namun membuang bagian force_det untuk efisiensi di web.
76
+
77
+ annotated_frame = frame.copy()
78
+ is_head_drop = False
79
+ is_hand_near_mouth = False
80
+ l_wrist_pt = None; r_wrist_pt = None; face_box = None
81
+ l_wrist_conf = None; r_wrist_conf = None
82
+
83
+ H, W = frame.shape[:2]
84
+
85
+ try:
86
+ res = model_pose.predict(frame, conf=0.5, verbose=False)[0]
87
+ # annotated_frame = res.plot() # Opsional: plot bawaan yolo
88
+
89
+ # Ambil orang pertama saja
90
+ if hasattr(res.keypoints, "data") and len(res.keypoints.data) > 0:
91
+ kpts = res.keypoints.data[0].cpu().numpy()
92
+
93
+ # Index keypoints COCO standard
94
+ # 0:Nose, 5:L-Shoulder, 6:R-Shoulder, 7:L-Elbow, 8:R-Elbow, 9:L-Wrist, 10:R-Wrist
95
+ nose = kpts[0][:2]
96
+ l_shoulder = kpts[5][:2]; r_shoulder = kpts[6][:2]
97
+ l_wrist = kpts[9][:2]; r_wrist = kpts[10][:2]
98
+ l_elbow = kpts[7][:2]; r_elbow = kpts[8][:2]
99
+
100
+ l_wrist_conf = float(kpts[9][2]); r_wrist_conf = float(kpts[10][2])
101
+
102
+ # 1. Logic Perpanjangan Tangan (Sama seperti kode Anda)
103
+ if l_wrist[0] != 0 and l_elbow[0] != 0:
104
+ vx = l_wrist[0] - l_elbow[0]; vy = l_wrist[1] - l_elbow[1]
105
+ rvx, rvy = rotate_vector(vx, vy, PALM_BEND_ANGLE)
106
+ l_wrist[0] += rvx * EXTENSION_RATIO; l_wrist[1] += rvy * EXTENSION_RATIO
107
+
108
+ if r_wrist[0] != 0 and r_elbow[0] != 0:
109
+ vx = r_wrist[0] - r_elbow[0]; vy = r_wrist[1] - r_elbow[1]
110
+ rvx, rvy = rotate_vector(vx, vy, -PALM_BEND_ANGLE)
111
+ r_wrist[0] += rvx * EXTENSION_RATIO; r_wrist[1] += rvy * EXTENSION_RATIO
112
+
113
+ l_wrist_pt = (float(l_wrist[0]), float(l_wrist[1]))
114
+ r_wrist_pt = (float(r_wrist[0]), float(r_wrist[1]))
115
+
116
+ # 2. Head Drop Logic
117
+ if nose[0] != 0 and (l_shoulder[0] != 0 or r_shoulder[0] != 0):
118
+ sy = (l_shoulder[1] + r_shoulder[1]) / 2 if (l_shoulder[0]*r_shoulder[0])!=0 else max(l_shoulder[1], r_shoulder[1])
119
+ dist = sy - nose[1]
120
+ cv2.putText(annotated_frame, f"Head Dist: {int(dist)}", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,0), 1)
121
+ if dist < HEAD_DROP_THRESHOLD: is_head_drop = True
122
+
123
+ # 3. Face Box Est (Simple from keypoints)
124
+ # Ambil min/max dari mata, hidung, telinga untuk kotak wajah
125
+ face_pts = kpts[0:5, :2] # nose, eyes, ears
126
+ valid_pts = face_pts[face_pts[:,0] > 0]
127
+ if len(valid_pts) > 0:
128
+ x1, y1 = valid_pts.min(axis=0); x2, y2 = valid_pts.max(axis=0)
129
+ pad = 20
130
+ face_box = [max(0, x1-pad), max(0, y1-pad), min(W, x2+pad), min(H, y2+pad + 50)] # +50 ke bawah dagu
131
+ cv2.rectangle(annotated_frame, (int(face_box[0]), int(face_box[1])), (int(face_box[2]), int(face_box[3])), (255,255,0), 2)
132
+
133
+ # 4. Wrist near Mouth
134
+ if face_box:
135
+ if l_wrist_conf > WRIST_CONF_THRESHOLD and is_point_in_box(l_wrist_pt, face_box): is_hand_near_mouth = True
136
+ if r_wrist_conf > WRIST_CONF_THRESHOLD and is_point_in_box(r_wrist_pt, face_box): is_hand_near_mouth = True
137
+
138
+ except Exception as e:
139
+ print(f"Pose Error: {e}")
140
+ pass
141
+
142
+ return is_head_drop, is_hand_near_mouth, annotated_frame, l_wrist_pt, r_wrist_pt, face_box, (l_wrist_conf, r_wrist_conf)
143
+
144
+ def process_frame(self, frame):
145
+ # Gradio pass RGB, opencv needs BGR
146
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
147
+ H, W = frame.shape[:2]
148
+
149
+ # 1. Detection
150
+ res_det = model_det.predict(frame, conf=CONF_THRESHOLD, verbose=False)[0]
151
+
152
+ # 2. Pose
153
+ is_head_drop, is_hand_in_box, pose_frame, lw, rw, face_box, wrist_confs = self.get_pose_features(frame)
154
+
155
+ # 3. Det Logic (Simple Overlap check)
156
+ det_hand_mouth_near = False
157
+ try:
158
+ for box, cls in zip(res_det.boxes.xyxy, res_det.boxes.cls):
159
+ if model_det.names[int(cls)] == "Hand_Mouth" and face_box:
160
+ # Simple check: box center in face box
161
+ bx1, by1, bx2, by2 = box.cpu().numpy()
162
+ cx, cy = (bx1+bx2)/2, (by1+by2)/2
163
+ if is_point_in_box((cx, cy), face_box):
164
+ det_hand_mouth_near = True
165
+ except: pass
166
+
167
+ # 4. Streak
168
+ if is_hand_in_box: self.pose_hand_streak += 1
169
+ else: self.pose_hand_streak = 0
170
+
171
+ if det_hand_mouth_near: self.det_hand_streak += 1
172
+ else: self.det_hand_streak = 0
173
+
174
+ flag_hand_mouth = (self.pose_hand_streak >= HAND_STREAK_REQUIRED) or (self.det_hand_streak >= HAND_STREAK_REQUIRED)
175
+
176
+ # 5. Activity
177
+ self.l_wrist_hist.append(lw if (wrist_confs[0] or 0) >= WRIST_CONF_THRESHOLD else None)
178
+ self.r_wrist_hist.append(rw if (wrist_confs[1] or 0) >= WRIST_CONF_THRESHOLD else None)
179
+
180
+ is_active = self.analyze_activity_level(self.l_wrist_hist) or self.analyze_activity_level(self.r_wrist_hist)
181
+
182
+ # 6. Combined Logic (Simplified)
183
+ labels_set = set([model_det.names[int(c)] for c in res_det.boxes.cls.cpu().numpy()]) if hasattr(res_det.boxes, 'cls') else set()
184
+
185
+ # Instant Drowsy Check
186
+ is_drowsy_frame = False
187
+ if not flag_hand_mouth:
188
+ if "Closed_Eyes" in labels_set and "Open_Mouth" in labels_set: is_drowsy_frame = True
189
+ elif "Closed_Eyes" in labels_set and "Closed_Mouth" in labels_set: is_drowsy_frame = True
190
+ elif is_head_drop: is_drowsy_frame = True
191
+
192
+ # 7. Sliding Window Logic
193
+ if is_active or flag_hand_mouth:
194
+ self.history.append(0)
195
+ else:
196
+ self.history.append(1 if is_drowsy_frame else 0)
197
+
198
+ drowsy_score = sum(self.history)
199
+ is_drowsy_window = (drowsy_score >= WINDOW_THRESHOLD)
200
+
201
+ # 8. Visualization
202
+ status_text = "AMAN"
203
+ status_color = (0, 255, 0)
204
+
205
+ if flag_hand_mouth:
206
+ status_text = "BAHAYA! (Hand on Face)"
207
+ status_color = (0, 0, 255)
208
+ elif is_drowsy_window:
209
+ status_text = "BAHAYA! (Mengantuk)"
210
+ status_color = (0, 0, 255)
211
+ elif is_active:
212
+ status_text = "AMAN (Aktif)"
213
+ status_color = (0, 255, 0)
214
+
215
+ # Draw HUD
216
+ annotated = res_det.plot() # Gambar kotak deteksi
217
+
218
+ # Overlay Pose Lines
219
+ if face_box:
220
+ cv2.rectangle(annotated, (int(face_box[0]), int(face_box[1])), (int(face_box[2]), int(face_box[3])), (255,255,0), 2)
221
+
222
+ if lw: cv2.circle(annotated, (int(lw[0]), int(lw[1])), 5, (0,255,255), -1)
223
+ if rw: cv2.circle(annotated, (int(rw[0]), int(rw[1])), 5, (0,255,255), -1)
224
+
225
+ # Top Bar
226
+ cv2.rectangle(annotated, (0, 0), (annotated.shape[1], 80), (0, 0, 0), -1)
227
+ cv2.putText(annotated, f"STATUS: {status_text}", (20, 35), 0, 0.8, status_color, 2)
228
+ cv2.putText(annotated, f"Score: {drowsy_score}/{WINDOW_SIZE} | Act: {is_active}", (20, 65), 0, 0.6, (200, 200, 200), 1)
229
+
230
+ return cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
231
+
232
+ # --- GRADIO INTERFACE ---
233
+ detector = DrowsinessDetector()
234
+
235
+ def inference(frame):
236
+ if frame is None: return None
237
+ return detector.process_frame(frame)
238
+
239
+ if __name__ == "__main__":
240
+ with gr.Blocks() as demo:
241
+ gr.Markdown("# 😴 AI Drowsiness Detector (YOLOv11)")
242
+ gr.Markdown("Pastikan webcam aktif. Deteksi mengantuk berbasis Pose & Object Detection.")
243
+
244
+ with gr.Row():
245
+ # Input webcam, streaming=True agar real-time
246
+ input_video = gr.Image(sources=["webcam"], streaming=True, label="Webcam Input")
247
+ output_video = gr.Image(label="AI Output")
248
+
249
+ # Saat webcam berubah (tiap frame), panggil fungsi inference
250
+ input_video.change(fn=inference, inputs=input_video, outputs=output_video)
251
+
252
  demo.launch()