emotion-recognition-api / tools /webcam_model_test.py
Digambar29's picture
Don't really know the changes
05fbb64
import cv2
import time
from PIL import Image
from inference import predict
CONF_THRESHOLD = 0.6
DECAY_SECONDS = 2.0
current_emotion = None
last_confident_ts = None
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cv2.namedWindow("Emotion Debug View", cv2.WINDOW_NORMAL)
print("Press 'q' to quit")
while True:
ret, frame = cap.read()
if not ret:
print("Failed to grab frame")
break
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(rgb)
result = predict(pil_img)
now = time.time()
if result["confidence"] >= CONF_THRESHOLD:
current_emotion = result["emotion"]
last_confident_ts = now
else:
if last_confident_ts and (now - last_confident_ts > DECAY_SECONDS):
current_emotion = None
conf = round(result["confidence"], 2)
label = f"{current_emotion if current_emotion else 'UNKNOWN'} | conf={conf}"
state = "STABLE" if current_emotion else "DECAYED"
cv2.putText(
frame,
f"State: {state} | {label}",
(20, 40),
cv2.FONT_HERSHEY_SIMPLEX,
0.9,
(0, 255, 0) if current_emotion else (0, 0, 255),
2
)
cv2.imshow("Emotion Debug View", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()