Clocksp commited on
Commit
f1a4ede
·
1 Parent(s): 98fc1b9

Initial commit

Browse files
Files changed (4) hide show
  1. app.py +59 -0
  2. model.pkl +3 -0
  3. requirements.txt +5 -0
  4. util.py +46 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import pickle
5
+ from util import get_face_landmarks
6
+
7
+ # Emotion labels
8
+ emotions = ['HAPPY', 'SAD', 'SURPRISED']
9
+
10
+ # Load the model once (not inside the function for speed)
11
+ with open('model.pkl', 'rb') as f:
12
+ model = pickle.load(f)
13
+
14
+ def predict_emotion(image):
15
+ # Convert PIL image to OpenCV format
16
+ img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
17
+
18
+ # Extract landmarks
19
+ face_landmarks = get_face_landmarks(img, draw=False, static_image_mode=True)
20
+
21
+ if face_landmarks is None:
22
+ return {"No face detected": 1.0}, image
23
+
24
+ # Predict emotion and probabilities
25
+ output = model.predict([face_landmarks])
26
+ predicted_emotion = emotions[int(output[0])]
27
+
28
+ if hasattr(model, "predict_proba"):
29
+ probs = model.predict_proba([face_landmarks])[0]
30
+ confidence_dict = {emotions[i]: float(probs[i]) for i in range(len(emotions))}
31
+ else:
32
+ confidence_dict = {predicted_emotion: 1.0}
33
+
34
+ # Annotate image
35
+ cv2.putText(img, predicted_emotion, (10, img.shape[0]-10),
36
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
37
+
38
+ # Convert back to RGB for Gradio
39
+ return confidence_dict, cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
40
+
41
+ # Example images (ensure these exist in /examples folder)
42
+ examples = [["examples/happy.png"], ["examples/sad.png"], ["examples/surprised.png"]]
43
+
44
+ # Gradio Interface
45
+ demo = gr.Interface(
46
+ fn=predict_emotion,
47
+ inputs=gr.Image(type="pil", label="Upload Image or Use Webcam", sources=["upload", "webcam"]),
48
+ outputs=[
49
+ gr.Label(num_top_classes=3, label="Predicted Emotion & Confidence"),
50
+ gr.Image(type="numpy", label="Annotated Image")
51
+ ],
52
+ title="Emotion Detector",
53
+ description="Upload an image or use webcam to detect emotions (HAPPY, SAD, SURPRISED).",
54
+ examples=examples,
55
+ theme="default"
56
+ )
57
+
58
+ if __name__ == "__main__":
59
+ demo.launch()
model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:766b2b73840a165978ed0258c4d77a7ff8b34129d5fac5b30d4b8c3503f8b354
3
+ size 18684691
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ tensorflow==2.15.0
2
+ mediapipe==0.10.21
3
+ numpy==1.26.4
4
+ opencv-python-headless==4.8.1.78
5
+ scikit-learn==1.3.2
util.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import mediapipe as mp
3
+
4
+ def get_face_landmarks(image, draw=False, static_image_mode=True):
5
+
6
+ # Read the input image
7
+ image_input_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
8
+
9
+ face_mesh = mp.solutions.face_mesh.FaceMesh(static_image_mode=static_image_mode,
10
+ max_num_faces=1,
11
+ min_detection_confidence=0.5)
12
+ image_rows, image_cols, _ = image.shape
13
+ results = face_mesh.process(image_input_rgb)
14
+
15
+ image_landmarks = []
16
+
17
+ if results.multi_face_landmarks:
18
+
19
+ if draw:
20
+
21
+ mp_drawing = mp.solutions.drawing_utils
22
+ mp_drawing_styles = mp.solutions.drawing_styles
23
+ drawing_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=1)
24
+
25
+ mp_drawing.draw_landmarks(
26
+ image=image,
27
+ landmark_list=results.multi_face_landmarks[0],
28
+ connections=mp.solutions.face_mesh.FACEMESH_CONTOURS,
29
+ landmark_drawing_spec=drawing_spec,
30
+ connection_drawing_spec=drawing_spec)
31
+
32
+ ls_single_face = results.multi_face_landmarks[0].landmark
33
+ xs_ = []
34
+ ys_ = []
35
+ zs_ = []
36
+ for idx in ls_single_face:
37
+ xs_.append(idx.x)
38
+ ys_.append(idx.y)
39
+ zs_.append(idx.z)
40
+ for j in range(len(xs_)):
41
+ image_landmarks.append(xs_[j] - min(xs_))
42
+ image_landmarks.append(ys_[j] - min(ys_))
43
+ image_landmarks.append(zs_[j] - min(zs_))
44
+
45
+ return image_landmarks
46
+