Liedetector / app.py
viswanani's picture
Update app.py
180d000 verified
import cv2
from deepface import DeepFace
import gradio as gr
import os
def analyze_emotion_from_video(video_path):
if not video_path or not os.path.exists(video_path):
return "❌ Error: No video file found or path is invalid."
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return "❌ Error: Could not open the video file."
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
middle_frame_index = frame_count // 2
cap.set(cv2.CAP_PROP_POS_FRAMES, middle_frame_index)
success, frame = cap.read()
if not success or frame is None:
return "❌ Error: Could not read a valid frame from the video."
try:
analysis = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
dominant_emotion = analysis[0]['dominant_emotion']
return f"βœ… Dominant Emotion: {dominant_emotion}"
except Exception as e:
return f"❌ Error during analysis: {str(e)}"
finally:
cap.release()
interface = gr.Interface(
fn=analyze_emotion_from_video,
inputs=gr.Video(label="Upload a Video File"),
outputs=gr.Textbox(label="Emotion Analysis Result"),
title="🎭 Video Emotion Detection",
description="Upload a short video with a visible face to detect the dominant emotion using DeepFace."
)
interface.launch()