test / behavior_backend /tests /services /test_interview_analyzer.py
hibatorrahmen's picture
Add backend application and Dockerfile
8ae78b0
import cv2
import time
import json
import os
import sys
# Add the project root to sys.path
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
sys.path.insert(0, parent_dir)
from app.services.processing.body_language_analyzer import InterviewAnalyzer, DEVICE
def analyze_video_file(video_path, display_video=True, save_results=True):
"""
Analyze interview performance (eye contact and body language) in a video file.
Args:
video_path: Path to the video file
display_video: Whether to display the video during analysis
save_results: Whether to save results to a JSON file
Returns:
dict: Comprehensive interview assessment
"""
# Open the video file
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print(f"Error: Could not open video file {video_path}")
return None
# Get video properties
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps if fps > 0 else 0
print(f"Analyzing video: {video_path}")
print(f"Video properties: {frame_count} frames, {fps:.2f} FPS, {duration:.2f} seconds")
print(f"Using device: {DEVICE}")
# Initialize analyzer
analyzer = InterviewAnalyzer()
frame_number = 0
# Variables for FPS calculation
prev_time = time.time()
fps_counter = 0
processing_fps = 0
# Process each frame
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Process the frame
metrics, annotated_frame = analyzer.process_frame(frame, display_video)
# Calculate processing FPS
fps_counter += 1
current_time = time.time()
if current_time - prev_time >= 1.0: # Update FPS every second
processing_fps = fps_counter / (current_time - prev_time)
fps_counter = 0
prev_time = current_time
# Display progress
frame_number += 1
progress = (frame_number / frame_count) * 100 if frame_count > 0 else 0
print(f"\rProgress: {progress:.1f}% (Frame {frame_number}/{frame_count})", end="")
# Calculate current video time
current_video_time = frame_number / fps if fps > 0 else 0
minutes = int(current_video_time // 60)
seconds = int(current_video_time % 60)
# Display the frame if requested
if display_video:
# Add progress information to the frame
cv2.putText(annotated_frame, f"Progress: {progress:.1f}%",
(20, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
# Add FPS information to the frame
cv2.putText(annotated_frame, f"Processing FPS: {processing_fps:.1f}",
(20, 170), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
# Add device information
cv2.putText(annotated_frame, f"Device: {DEVICE}",
(20, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
# Add current video time
cv2.putText(annotated_frame, f"Time: {minutes:02d}:{seconds:02d}",
(20, 230), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
# Show frame
cv2.imshow("Interview Analysis", annotated_frame)
# Break if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Clean up
cap.release()
if display_video:
cv2.destroyAllWindows()
print("\nAnalysis complete!")
# Get comprehensive assessment
assessment = analyzer.get_comprehensive_assessment()
# Add video info to assessment
assessment['video_info'] = {
"path": video_path,
"frames": frame_count,
"fps": fps,
"duration_seconds": duration,
"device_used": DEVICE
}
# Save results to file if requested
if save_results:
output_file = video_path.split('/')[-1].split('.')[0] + '_interview_analysis.json'
with open(output_file, 'w') as f:
json.dump(assessment, f, indent=2)
print(f"Results saved to {output_file}")
# Print key statistics
print("\n--- Interview Assessment ---")
print(f"Overall Score: {assessment['overall_score']:.1f}/10")
print(f"Total frames analyzed: {assessment['key_statistics']['total_frames']}")
print(f"Analysis duration: {assessment['key_statistics']['total_duration_seconds']:.2f} seconds")
# Print eye contact statistics
print("\n--- Eye Contact Statistics ---")
print(f"Eye Contact Score: {assessment['eye_contact']['score']}/10")
print(f"Eye contact percentage: {assessment['key_statistics']['eye_contact_percentage']:.2f}%")
print(f"Longest eye contact: {assessment['key_statistics']['longest_eye_contact_seconds']:.2f} seconds")
print(f"Average contact duration: {assessment['key_statistics']['average_contact_duration_seconds']:.2f} seconds")
# Print eye contact patterns and recommendations
print("\nEye Contact Patterns:")
for pattern in assessment['eye_contact']['patterns']:
print(f"- {pattern}")
print("\nEye Contact Recommendations:")
for rec in assessment['eye_contact']['recommendations']:
print(f"- {rec}")
# Print body language statistics
print("\n--- Body Language Statistics ---")
print(f"Confidence Score: {assessment['body_language']['confidence_score']}/10")
print(f"Engagement Score: {assessment['body_language']['engagement_score']}/10")
print(f"Comfort Score: {assessment['body_language']['comfort_score']}/10")
# Print body language strengths, areas for improvement, and recommendations
print("\nBody Language Strengths:")
for strength in assessment['body_language']['strengths']:
print(f"- {strength}")
print("\nBody Language Areas for Improvement:")
for area in assessment['body_language']['areas_for_improvement']:
print(f"- {area}")
print("\nBody Language Recommendations:")
for rec in assessment['body_language']['recommendations']:
print(f"- {rec}")
return assessment
if __name__ == "__main__":
# Path to the video file
video_path = "../../static/uploads/30a350b2-704d-4af3-89d3-567e3e2296bd.mp4"
# Analyze the video
analyze_video_file(video_path, display_video=True, save_results=True)