vaniac commited on
Commit
6c4675b
·
verified ·
1 Parent(s): 9fc6194

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import dlib
5
+ from scipy.spatial import distance as dist
6
+ from imutils import face_utils
7
+
8
+ # Constants
9
+ EYE_AR_THRESH = 0.3
10
+ EYE_AR_CONSEC_FRAMES = 30
11
+ YAWN_THRESH = 20
12
+
13
+ # Global variables
14
+ COUNTER = 0 # Global COUNTER defined here
15
+
16
+ # Functions
17
+ def eye_aspect_ratio(eye):
18
+ A = dist.euclidean(eye[1], eye[5])
19
+ B = dist.euclidean(eye[2], eye[4])
20
+ C = dist.euclidean(eye[0], eye[3])
21
+ ear = (A + B) / (2.0 * C)
22
+ return ear
23
+
24
+ def final_ear(shape):
25
+ (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
26
+ (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
27
+ leftEye = shape[lStart:lEnd]
28
+ rightEye = shape[rStart:rEnd]
29
+ leftEAR = eye_aspect_ratio(leftEye)
30
+ rightEAR = eye_aspect_ratio(rightEye)
31
+ ear = (leftEAR + rightEAR) / 2.0
32
+ return (ear, leftEye, rightEye)
33
+
34
+ def lip_distance(shape):
35
+ top_lip = shape[50:53]
36
+ top_lip = np.concatenate((top_lip, shape[61:64]))
37
+ low_lip = shape[56:59]
38
+ low_lip = np.concatenate((low_lip, shape[65:68]))
39
+ top_mean = np.mean(top_lip, axis=0)
40
+ low_mean = np.mean(low_lip, axis=0)
41
+ distance = abs(top_mean[1] - low_mean[1])
42
+ return distance
43
+
44
+ def process_frame(frame, detector, predictor):
45
+ """Process the frame and detect drowsiness and yawning."""
46
+ global COUNTER # Declare COUNTER as global here
47
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
48
+ rects = detector.detectMultiScale(
49
+ gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE
50
+ )
51
+
52
+ for (x, y, w, h) in rects:
53
+ rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
54
+ shape = predictor(gray, rect)
55
+ shape = face_utils.shape_to_np(shape)
56
+
57
+ ear, leftEye, rightEye = final_ear(shape)
58
+ distance = lip_distance(shape)
59
+
60
+ leftEyeHull = cv2.convexHull(leftEye)
61
+ rightEyeHull = cv2.convexHull(rightEye)
62
+ cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
63
+ cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
64
+
65
+ lip = shape[48:60]
66
+ cv2.drawContours(frame, [lip], -1, (0, 255, 0), 1)
67
+
68
+ if ear < EYE_AR_THRESH:
69
+ COUNTER += 1
70
+ if COUNTER >= EYE_AR_CONSEC_FRAMES:
71
+ cv2.putText(frame, "DROWSINESS", (10, 30),
72
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
73
+ else:
74
+ COUNTER = 0
75
+
76
+ if distance > YAWN_THRESH:
77
+ cv2.putText(frame, "YAWN", (10, 60),
78
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
79
+
80
+ cv2.putText(frame, f"EAR: {ear:.2f}", (300, 30),
81
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
82
+ cv2.putText(frame, f"YAWN: {distance:.2f}", (300, 60),
83
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
84
+
85
+ return frame
86
+
87
+ # Load detector and predictor
88
+ detector = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
89
+ predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
90
+
91
+ # Streamlit UI
92
+ st.title("Sleep Detection using OpenCV")
93
+ st.markdown("**Check the box below to start the camera:**")
94
+
95
+ run = st.checkbox("Run Camera")
96
+
97
+ # Video capture
98
+ if run:
99
+ cap = cv2.VideoCapture(0)
100
+ FRAME_WINDOW = st.image([])
101
+
102
+ while True:
103
+ ret, frame = cap.read()
104
+ if not ret:
105
+ st.error("Failed to open webcam.")
106
+ break
107
+
108
+ frame = cv2.resize(frame, (450, 300))
109
+ frame = process_frame(frame, detector, predictor) # Process frame here
110
+ FRAME_WINDOW.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
111
+ else:
112
+ st.info("Check 'Run Camera' to start detection.")
haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ opencv-python-headless
3
+ numpy
4
+ dlib
5
+ imutils
6
+ scipy
shape_predictor_68_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
3
+ size 99693937