Haryiank commited on
Commit
c08f9a0
·
verified ·
1 Parent(s): a6517df

Upload 14 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ WORKDIR /code
4
+ COPY . .
5
+
6
+ RUN pip install --upgrade pip
7
+ RUN pip install -r requirements.txt
8
+
9
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
app/calibration_task.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+ import mediapipe as mp
4
+ import numpy as np
5
+ import json
6
+ from microexpression_tracker import track_microexpressions
7
+
8
+ mp_face_mesh = mp.solutions.face_mesh
9
+ face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=2)
10
+ CALIBRATION_DURATION = 10 # seconds
11
+
12
+ print("\n🟩 Welcome to SafeSpace!")
13
+ print("Press SPACE to begin calibration. Position your face within the webcam frame and keep it still.")
14
+ cap = cv2.VideoCapture(0)
15
+
16
+ # Wait for SPACE key to start
17
+ while True:
18
+ ret, frame = cap.read()
19
+ if not ret:
20
+ continue
21
+ cv2.putText(frame, "Press SPACE to start calibration", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
22
+ cv2.imshow("Calibration", frame)
23
+ if cv2.waitKey(1) & 0xFF == 32:
24
+ break
25
+
26
+ print("\n🟡 Calibration started. Please follow the instructions for 5 seconds.")
27
+
28
+ start_time = time.time()
29
+ eye_centers = []
30
+
31
+ while time.time() - start_time < CALIBRATION_DURATION:
32
+ ret, frame = cap.read()
33
+ if not ret:
34
+ continue
35
+
36
+ micro, face_bbox, multiple_faces = track_microexpressions(frame, face_mesh, {})
37
+
38
+ h, w, _ = frame.shape
39
+
40
+ if face_bbox:
41
+ cv2.rectangle(frame, (face_bbox[0], face_bbox[1]), (face_bbox[2], face_bbox[3]), (0,255,0), 2)
42
+ if multiple_faces:
43
+ cv2.putText(frame, "Multiple faces detected!", (20, h-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 3)
44
+
45
+ if face_bbox:
46
+ eye_centers.append((face_bbox[0] + face_bbox[2]) / 2 / w)
47
+
48
+ elapsed = int(time.time() - start_time)
49
+ cv2.putText(frame, f"Calibration: {CALIBRATION_DURATION-elapsed}s left", (20, 450), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
50
+ cv2.imshow("Calibration", frame)
51
+ if cv2.waitKey(1) & 0xFF == ord('q'):
52
+ break
53
+
54
+ print("\n✅ Calibration finished. Thank you!")
55
+ cap.release()
56
+ cv2.destroyAllWindows()
57
+
58
+ user_calib = {
59
+ 'eye_left': np.percentile(eye_centers, 5) if eye_centers else 0.35,
60
+ 'eye_right': np.percentile(eye_centers, 95) if eye_centers else 0.65
61
+ }
62
+
63
+ with open('user_calibration.json', 'w') as f:
64
+ json.dump(user_calib, f)
65
+
66
+ print("\n🟢 Ready for session. Please start the main stress detection.")
app/final_facial_model.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import joblib
3
+ import time
4
+ import json
5
+ from microexpression_tracker import track_microexpressions, get_lip_engagement
6
+ from collections import Counter
7
+ import mediapipe as mp
8
+ import numpy as np
9
+
10
+ # --- Load microexpression calibration ---
11
+ with open('user_calibration.json', 'r') as f:
12
+ calibration_ref = json.load(f)
13
+
14
+ # --- Load your ELM model and scaler ---
15
+ # (Commented out for now)
16
+ # elm_model = joblib.load("src/model_fer.pkl")
17
+ # scaler = joblib.load("src/scaler_fer.pkl")
18
+
19
+ IMG_SIZE = 96 # use same as in training
20
+ SESSION_DURATION = 15 # seconds
21
+ EYE_AWAY_THRESHOLD = 20
22
+ HEAD_TURN_THRESHOLD = 20
23
+
24
+ def preprocess_for_model(frame):
25
+ img = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
26
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
27
+ img = img.astype(np.float32) / 255.0
28
+ img = img.flatten().reshape(1, -1)
29
+ return img
30
+
31
+ cap = cv2.VideoCapture(0)
32
+
33
+ print("\n📸 Welcome! Press SPACE to start stress & engagement analysis.")
34
+ while True:
35
+ ret, frame = cap.read()
36
+ if not ret:
37
+ continue
38
+ cv2.putText(frame, "Press SPACE to start", (60, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
39
+ cv2.imshow("Webcam", frame)
40
+ if cv2.waitKey(1) & 0xFF == 32:
41
+ break
42
+
43
+ print("\n🟡 Session started. Look at the screen for 15 seconds.")
44
+ session_start = time.time()
45
+
46
+ mp_face_mesh = mp.solutions.face_mesh
47
+ face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1)
48
+
49
+ eye_away_count = 0
50
+ head_turn_count = 0
51
+ engagement_predictions = [] # Not used if ELM is commented out, but kept for completeness
52
+ lip_engagement_predictions = []
53
+
54
+ while time.time() - session_start < SESSION_DURATION:
55
+ ret, frame = cap.read()
56
+ if not ret:
57
+ continue
58
+
59
+ # --- Microexpression detection ---
60
+ micro, face_bbox, multiple_faces = track_microexpressions(frame, face_mesh, calibration_ref)
61
+
62
+ # --- Lip engagement detection ---
63
+ h, w, _ = frame.shape
64
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
+ results = face_mesh.process(frame_rgb)
66
+ lip_engagement = "No Face"
67
+ if results.multi_face_landmarks:
68
+ landmarks = [(lm.x, lm.y) for lm in results.multi_face_landmarks[0].landmark]
69
+ lip_engagement = get_lip_engagement(landmarks)
70
+ lip_engagement_predictions.append(lip_engagement)
71
+ if face_bbox:
72
+ cv2.rectangle(frame, (face_bbox[0], face_bbox[1]), (face_bbox[2], face_bbox[3]), (0,255,0), 2)
73
+ else:
74
+ lip_engagement_predictions.append("No Face")
75
+ landmarks = None
76
+
77
+ if micro["eye_away"]:
78
+ eye_away_count += 1
79
+ if micro["head_turn"]:
80
+ head_turn_count += 1
81
+
82
+ if multiple_faces:
83
+ cv2.putText(frame, "Multiple faces detected!", (20, 450), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 3)
84
+
85
+ elapsed = int(time.time() - session_start)
86
+ cv2.putText(frame, f"Time left: {SESSION_DURATION-elapsed}s", (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
87
+ cv2.putText(frame, f"Engagement: {lip_engagement}", (20, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
88
+ cv2.imshow('SafeSpace Session', frame)
89
+
90
+ print(f"Frame: {len(lip_engagement_predictions)} | Engagement: {lip_engagement}")
91
+
92
+ if cv2.waitKey(1) & 0xFF == ord('q'):
93
+ break
94
+
95
+ cap.release()
96
+ cv2.destroyAllWindows()
97
+
98
+ # --- Summarize Lip Engagement Results (Safe & Robust) ---
99
+ lip_counts = Counter(lip_engagement_predictions)
100
+ # Remove "No Face" for reporting majority, if present
101
+ lip_counts_no_face = Counter({k: v for k, v in lip_counts.items() if k != "No Face"})
102
+ lip_total = sum(lip_counts_no_face.values())
103
+
104
+ if lip_total > 0:
105
+ for label in ["Engaged", "Partially Engaged", "Not Engaged"]:
106
+ print(f"Engagement {label}: {lip_counts_no_face.get(label,0)} frames ({(lip_counts_no_face.get(label,0)/lip_total)*100:.1f}%)")
107
+
108
+ lip_majority_label = lip_counts_no_face.most_common(1)[0][0]
109
+ else:
110
+ print("No valid engagement predictions to summarize.")
111
+ lip_majority_label = "No Face"
112
+
113
+ # --- Microexpression-based feedback ---
114
+ if 10 < eye_away_count < 20 and 10 < head_turn_count < 20:
115
+ print(f"\nEye distraction detected some times in session.")
116
+ print(f"Head turn detected some times in session.")
117
+
118
+ if eye_away_count > EYE_AWAY_THRESHOLD:
119
+ print(f"\nEye distraction detected many times in session.")
120
+ if head_turn_count > HEAD_TURN_THRESHOLD:
121
+ print(f"Head turn detected many times in session.")
122
+
123
+ # --- Final Hybrid Result (using only lips & microexpression, as ELM is off) ---
124
+ hybrid_result = lip_majority_label
125
+
126
+ # Apply microexpression override
127
+ if eye_away_count > EYE_AWAY_THRESHOLD or head_turn_count > HEAD_TURN_THRESHOLD:
128
+ if hybrid_result == "Engaged":
129
+ hybrid_result = "Partially Engaged"
130
+ elif hybrid_result == "Partially Engaged":
131
+ hybrid_result = "Not Engaged"
132
+
133
+ print(f"\n✅ Final Conclusion (with Microexpressions): {hybrid_result}")
app/main.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/main.py
2
+
3
+ from fastapi import FastAPI, UploadFile
4
+ import numpy as np
5
+ import cv2
6
+ from app.calibration_task import generate_calibration_data
7
+ from app.final_facial_model import predict_stress_level
8
+
9
+ app = FastAPI()
10
+
11
+ CALIBRATION_SECONDS = 10
12
+ ANALYSIS_SECONDS = 30
13
+ FPS = 10
14
+
15
+ calib_frames = []
16
+ analysis_frames = []
17
+
18
+ @app.post("/calibrate")
19
+ async def calibrate(image: UploadFile):
20
+ contents = await image.read()
21
+ np_arr = np.frombuffer(contents, np.uint8)
22
+ frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
23
+ calib_frames.append(frame)
24
+
25
+ if len(calib_frames) >= CALIBRATION_SECONDS * FPS:
26
+ generate_calibration_data(calib_frames)
27
+ calib_frames.clear()
28
+ return {"status": "Calibration Complete"}
29
+ return {"status": f"Calibrating... {len(calib_frames)//FPS}s"}
30
+
31
+ @app.post("/analyze")
32
+ async def analyze(image: UploadFile):
33
+ contents = await image.read()
34
+ np_arr = np.frombuffer(contents, np.uint8)
35
+ frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
36
+ analysis_frames.append(frame)
37
+
38
+ if len(analysis_frames) >= ANALYSIS_SECONDS * FPS:
39
+ result = predict_stress_level(analysis_frames)
40
+ analysis_frames.clear()
41
+ return {"result": result}
42
+ return {"status": f"Analyzing... {len(analysis_frames)//FPS}s"}
app/microexpression_tracker.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import mediapipe as mp
4
+
5
+
6
+ LEFT_EYE = [33, 133]
7
+ RIGHT_EYE = [362, 263]
8
+ NOSE = 1
9
+
10
+ mp_face_mesh = mp.solutions.face_mesh
11
+ def get_lip_engagement(landmarks):
12
+ TOP_LIP = 13
13
+ BOTTOM_LIP = 14
14
+ LIP_LEFT = 78
15
+ LIP_RIGHT = 308
16
+ top_lip = landmarks[TOP_LIP]
17
+ bottom_lip = landmarks[BOTTOM_LIP]
18
+ left_corner = landmarks[LIP_LEFT]
19
+ right_corner = landmarks[LIP_RIGHT]
20
+ lip_opening = abs(top_lip[1] - bottom_lip[1])
21
+ lip_width = abs(right_corner[0] - left_corner[0])
22
+
23
+ # print(f"[DEBUG] lip_opening: {lip_opening:.3f}, lip_width: {lip_width:.3f}")
24
+
25
+ # Example, adjust as per your actual values!
26
+ # This logic: high opening OR high width = Engaged (smile/mouth open)
27
+ # very small both = Not Engaged, everything else = Partially Engaged
28
+ if lip_opening > 0.01 or lip_width > 0.18:
29
+ return "Engaged"
30
+ elif lip_opening < 0.002 or lip_width < 0.04:
31
+ return "Not Engaged"
32
+ else:
33
+ return "Partially Engaged"
34
+
35
+
36
+
37
+
38
+
39
+ def track_microexpressions(frame, face_mesh, calibration_ref=None):
40
+ if calibration_ref is None:
41
+ calibration_ref = {}
42
+ h, w, _ = frame.shape
43
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
44
+ results = face_mesh.process(frame_rgb)
45
+ micro = {
46
+ "eye_away": False,
47
+ "head_turn": False,
48
+ }
49
+ face_bbox = None
50
+ multiple_faces = False
51
+
52
+ if results.multi_face_landmarks:
53
+ if len(results.multi_face_landmarks) > 1:
54
+ multiple_faces = True
55
+
56
+ lm = results.multi_face_landmarks[0].landmark
57
+ xs = [p.x for p in lm]
58
+ ys = [p.y for p in lm]
59
+ xmin, xmax = min(xs)*w, max(xs)*w
60
+ ymin, ymax = min(ys)*h, max(ys)*h
61
+ face_bbox = [int(xmin), int(ymin), int(xmax), int(ymax)]
62
+
63
+ eye_x = (lm[LEFT_EYE[0]].x + lm[RIGHT_EYE[0]].x) / 2
64
+ nose_x = lm[NOSE].x
65
+
66
+ margin = 0.07
67
+ eye_left_th = calibration_ref.get('eye_left', 0.30)
68
+ eye_right_th = calibration_ref.get('eye_right', 0.70)
69
+ if eye_x < (eye_left_th - margin) or eye_x > (eye_right_th + margin):
70
+ micro["eye_away"] = True
71
+ if nose_x < (eye_left_th - margin) or nose_x > (eye_right_th + margin):
72
+ micro["head_turn"] = True
73
+
74
+ return micro, face_bbox, multiple_faces
app/model_files/label_encoder_fer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec6798a50709c0ea32b0d6ac9817d4b45f2645d02b4f3cea8da303c3ae9438da
3
+ size 531
app/model_files/model_fer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f70ac0fc7d4c7783efbdfb5e7ed7c2b46dfd0e2ab0db3a7d4f75498005226bdf
3
+ size 6780643
app/model_files/scaler_fer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cedfac8ecc967834e963ddb617ff07f815c96bd3655629542870decf1363211
3
+ size 68199
app/model_files/user_calibration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eye_left": 0.54296875, "eye_right": 0.559375}
app/simple_elm.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ class SimpleELMClassifier:
5
+ def __init__(self, n_hidden=300, activation='relu', random_state=42):
6
+ self.n_hidden = n_hidden
7
+ self.activation = activation
8
+ self.random_state = random_state
9
+ self.is_fitted = False
10
+
11
+ def _activation(self, X):
12
+ if self.activation == 'sigmoid':
13
+ return 1 / (1 + np.exp(-X))
14
+ elif self.activation == 'tanh':
15
+ return np.tanh(X)
16
+ elif self.activation == 'relu':
17
+ return np.maximum(0, X)
18
+ else:
19
+ raise ValueError('Unsupported activation')
20
+
21
+ def fit(self, X, y):
22
+ np.random.seed(self.random_state)
23
+ n_samples, n_features = X.shape
24
+ n_classes = len(np.unique(y))
25
+ self.classes_ = np.unique(y)
26
+ Y = np.zeros((n_samples, n_classes))
27
+ for i, label in enumerate(self.classes_):
28
+ Y[y == label, i] = 1
29
+
30
+ self.W = np.random.randn(n_features, self.n_hidden)
31
+ self.b = np.random.randn(self.n_hidden)
32
+ H = self._activation(np.dot(X, self.W) + self.b)
33
+ self.beta = np.dot(np.linalg.pinv(H), Y)
34
+ self.is_fitted = True
35
+
36
+ def predict_proba(self, X):
37
+ if not self.is_fitted:
38
+ raise Exception("Model not fitted yet.")
39
+ H = self._activation(np.dot(X, self.W) + self.b)
40
+ logits = np.dot(H, self.beta)
41
+ e_logits = np.exp(logits - np.max(logits, axis=1, keepdims=True))
42
+ return e_logits / np.sum(e_logits, axis=1, keepdims=True)
43
+
44
+ def predict(self, X):
45
+ proba = self.predict_proba(X)
46
+ return np.argmax(proba, axis=1)
app/utils.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tensorflow.keras.applications.resnet50 import preprocess_input
3
+ from tensorflow.keras.preprocessing.image import img_to_array
4
+ from tensorflow.keras.applications import ResNet50
5
+ import joblib
6
+ from simple_elm import SimpleELMClassifier
7
+ import cv2
8
+ from microexpression_tracker import track_microexpressions
9
+ import time
10
+ import json
11
+
12
+ class_names = ['anger', 'contempt', 'disgust', 'fear', 'happiness', 'neutral', 'sadness', 'surprise']
13
+
14
+ resnet = ResNet50(weights='imagenet', include_top=False, input_shape=(96, 96, 3), pooling='avg')
15
+
16
+ model_fer = joblib.load("src/model_fer.pkl")
17
+ scaler_fer = joblib.load("src/scaler_fer.pkl")
18
+ # model_ck = joblib.load("src/model_ck.pkl")
19
+ # scaler_ck = joblib.load("src/scaler_ck.pkl")
20
+
21
+ def map_emotion_to_engagement(emotion_label):
22
+ # fer returns: 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'
23
+ if emotion_label in ['happy', 'surprise']:
24
+ return "Engaged"
25
+ elif emotion_label in ['neutral']:
26
+ return "Partially Engaged"
27
+ else:
28
+ return "Not Engaged"
29
+
30
+
31
+ def preprocess_image_for_resnet(image, target_size=(96, 96)):
32
+ import cv2
33
+ image = cv2.resize(image, target_size)
34
+ image = img_to_array(image)
35
+ image = np.expand_dims(image, axis=0)
36
+ return preprocess_input(image)
37
+
38
+ def predict_emotion(frame):
39
+ try:
40
+ preprocessed = preprocess_image_for_resnet(frame)
41
+ features = resnet.predict(preprocessed, verbose=0).flatten().reshape(1, -1)
42
+ feat_fer = scaler_fer.transform(features)
43
+ probs_fer = model_fer.predict_proba(feat_fer)[0]
44
+ # Get the index of the class with highest probability
45
+ idx = np.argmax(probs_fer)
46
+ final_label = class_names[idx]
47
+ print(f"[DEBUG] Emotion probabilities: {dict(zip(class_names, np.round(probs_fer, 3)))}")
48
+ print(f"[DEBUG] Detected emotion: {final_label}")
49
+ return final_label
50
+ except Exception as e:
51
+ print(f"[ERROR] Prediction failed: {e}")
52
+ return "error"
53
+
54
+
55
+ def predict_engagement_class(frame):
56
+ emotion_label = predict_emotion(frame)
57
+ print(f"[DEBUG] Detected emotion: {emotion_label}")
58
+ engagement_label = map_emotion_to_engagement(emotion_label)
59
+ return engagement_label
60
+
61
+
app/webcam_inference.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # webcam_inference.py
2
+
3
+ import cv2
4
+ import numpy as np
5
+ import joblib
6
+ from tensorflow.keras.applications import ResNet50
7
+ from tensorflow.keras.applications.resnet50 import preprocess_input
8
+ from tensorflow.keras.preprocessing.image import img_to_array
9
+ from utils import map_emotion_to_engagement
10
+ import os
11
+ from hpelm import ELM
12
+ import joblib
13
+ from simple_elm import SimpleELMClassifier
14
+ import joblib
15
+ model_fer = joblib.load("src/model_fer.pkl")
16
+ scaler_fer = joblib.load("src/scaler_fer.pkl")
17
+
18
+ # Load models and scalers
19
+ # elm=ELM(2048,7)
20
+ # model_fer = joblib.load("src/model_fer.pkl")
21
+ # scaler_fer = joblib.load("src/scaler_fer.pkl")
22
+ # model_ck = joblib.load("src/model_ck.pkl")
23
+ # scaler_ck = joblib.load("src/scaler_ck.pkl")
24
+
25
+ # Class index to emotion label
26
+ class_names = ['anger', 'contempt', 'disgust', 'fear', 'happiness', 'neutral', 'sadness', 'surprise']
27
+
28
+ # Load ResNet50 model
29
+ resnet = ResNet50(weights='imagenet', include_top=False, input_shape=(96, 96, 3), pooling='avg')
30
+
31
+ # Preprocess frame
32
+ def preprocess_frame(frame):
33
+ frame = cv2.resize(frame, (96, 96))
34
+ frame = img_to_array(frame)
35
+ frame = np.expand_dims(frame, axis=0)
36
+ return preprocess_input(frame)
37
+
38
+ # Predict function with neutral class bypass logic
39
+ def predict_emotion(frame):
40
+ try:
41
+ # Extract features
42
+ preprocessed = preprocess_frame(frame)
43
+ features = resnet.predict(preprocessed, verbose=0).flatten().reshape(1, -1)
44
+
45
+ # Scale features
46
+ feat_fer = scaler_fer.transform(features)
47
+ feat_ck = scaler_ck.transform(features)
48
+
49
+ # Get probabilities
50
+ probs_fer = model_fer.predict_proba(feat_fer)[0]
51
+ # probs_ck = model_ck.predict_proba(feat_ck)[0]
52
+
53
+ label_fer = class_names[np.argmax(probs_fer)]
54
+
55
+ # If FER predicts neutral, use only FER
56
+ final_probs = probs_fer
57
+
58
+ final_label = class_names[np.argmax(final_probs)]
59
+ return final_label
60
+
61
+ except Exception as e:
62
+ print(f"[ERROR] Prediction failed: {e}")
63
+ return "error"
64
+
65
+ # Main webcam loop
66
+ def run_webcam():
67
+ cap = cv2.VideoCapture(0)
68
+ print("[INFO] Webcam started. Press 'q' to quit.")
69
+
70
+ while True:
71
+ ret, frame = cap.read()
72
+ if not ret:
73
+ break
74
+
75
+ flipped = cv2.flip(frame, 1) # Flip for mirror view
76
+ emotion = predict_emotion(flipped)
77
+ engagement = map_emotion_to_engagement(emotion)
78
+
79
+ # Overlay
80
+ cv2.putText(flipped, f"Emotion: {emotion}", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
81
+ cv2.putText(flipped, f"Engagement: {engagement}", (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
82
+
83
+ cv2.imshow("FINAL FACIAL MODEL FOR SAFESPACE", flipped)
84
+
85
+ if cv2.waitKey(1) & 0xFF == ord('q'):
86
+ break
87
+
88
+ cap.release()
89
+ cv2.destroyAllWindows()
90
+
91
+ if __name__ == "__main__":
92
+ run_webcam()
huggingface.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ sdk: docker
requirements.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ mediapipe
3
+ tensorflow
4
+ transformers
5
+ matplotlib
6
+ tqdm
7
+ imutils
8
+ dlib
9
+ Pillow
10
+
11
+
12
+ fastapi
13
+ uvicorn
14
+ numpy
15
+ opencv-python
16
+ scikit-learn
17
+ joblib
18
+ dlib
19
+ imutils