File size: 2,737 Bytes
06d31de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# facial_diagnostics.py


import cv2
import numpy as np
import math
from statistics import median

# Facial metric helpers
LEFT_EYE_IDX = [33, 160, 158, 133, 153, 144]
RIGHT_EYE_IDX = [362, 385, 387, 263, 373, 380]
MOUTH_IDX = [13, 14, 78, 308]
SMILE_LEFT = 61
SMILE_RIGHT = 291

def eye_aspect_ratio(pts, idx):
    a = np.linalg.norm(np.array(pts[idx[1]]) - np.array(pts[idx[5]]))
    b = np.linalg.norm(np.array(pts[idx[2]]) - np.array(pts[idx[4]]))
    c = np.linalg.norm(np.array(pts[idx[0]]) - np.array(idx[3])) + 1e-8
    return float((a + b) / (2.0 * c))

def mouth_ratio(pts):
    top, bottom, left, right = pts[MOUTH_IDX[0]], pts[MOUTH_IDX[1]], pts[MOUTH_IDX[2]], pts[MOUTH_IDX[3]]
    return float(np.linalg.norm(np.array(top) - np.array(bottom)) /
                 (np.linalg.norm(np.array(left) - np.array(right)) + 1e-8))

def head_tilt_angle(pts):
    left = np.mean([pts[33], pts[133]], axis=0)
    right = np.mean([pts[362], pts[263]], axis=0)
    diff = np.array(right) - np.array(left)
    return float(math.degrees(math.atan2(diff[1], diff[0])))

def smile_symmetry(pts):
    left = np.array(pts[SMILE_LEFT])
    right = np.array(pts[SMILE_RIGHT])
    center = (left + right) / 2
    L = np.linalg.norm(left - center)
    R = np.linalg.norm(right - center)
    if L + R == 0:
        return 1.0
    return float(min(L, R) / max(L, R))

# Face mesh
import mediapipe as mp
mp_face_mesh = mp.solutions.face_mesh
FACE_MESH = mp_face_mesh.FaceMesh(
    static_image_mode=False,
    max_num_faces=1,
    refine_landmarks=True,
    min_detection_confidence=0.6,
    min_tracking_confidence=0.6
)

def analyze_frame(frame):
    h, w, _ = frame.shape
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    res = FACE_MESH.process(rgb)

    if not res.multi_face_landmarks:
        return {"found": False}

    lm = res.multi_face_landmarks[0]
    pts = [(int(p.x * w), int(p.y * h)) for p in lm.landmark]

    ear = (eye_aspect_ratio(pts, LEFT_EYE_IDX) + eye_aspect_ratio(pts, RIGHT_EYE_IDX)) / 2
    mratio = mouth_ratio(pts)
    tilt = head_tilt_angle(pts)
    symmetry = smile_symmetry(pts)

    return {
        "found": True,
        "ear": float(ear),
        "mouth_ratio": float(mratio),
        "head_tilt": float(tilt),
        "smile_sym": float(symmetry)
    }

def aggregate_metrics(metrics):
    if not metrics:
        return {}
    ears = [m["ear"] for m in metrics]
    mouths = [m["mouth_ratio"] for m in metrics]
    tilts = [m["head_tilt"] for m in metrics]
    smiles = [m["smile_sym"] for m in metrics]
    return {
        "count": len(metrics),
        "ear_median": median(ears),
        "mouth_median": median(mouths),
        "tilt_median": median(tilts),
        "smile_median": median(smiles)
    }