| | import cv2 |
| | import time |
| | import json |
| | import os |
| | import sys |
| |
|
| | |
| | current_dir = os.path.dirname(os.path.abspath(__file__)) |
| | parent_dir = os.path.dirname(os.path.dirname(current_dir)) |
| | sys.path.insert(0, parent_dir) |
| | from app.services.processing.body_language_analyzer import InterviewAnalyzer, DEVICE |
| | def analyze_video_file(video_path, display_video=True, save_results=True): |
| | """ |
| | Analyze interview performance (eye contact and body language) in a video file. |
| | |
| | Args: |
| | video_path: Path to the video file |
| | display_video: Whether to display the video during analysis |
| | save_results: Whether to save results to a JSON file |
| | |
| | Returns: |
| | dict: Comprehensive interview assessment |
| | """ |
| | |
| | cap = cv2.VideoCapture(video_path) |
| | if not cap.isOpened(): |
| | print(f"Error: Could not open video file {video_path}") |
| | return None |
| | |
| | |
| | fps = cap.get(cv2.CAP_PROP_FPS) |
| | frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| | duration = frame_count / fps if fps > 0 else 0 |
| | |
| | print(f"Analyzing video: {video_path}") |
| | print(f"Video properties: {frame_count} frames, {fps:.2f} FPS, {duration:.2f} seconds") |
| | print(f"Using device: {DEVICE}") |
| | |
| | |
| | analyzer = InterviewAnalyzer() |
| | frame_number = 0 |
| | |
| | |
| | prev_time = time.time() |
| | fps_counter = 0 |
| | processing_fps = 0 |
| | |
| | |
| | while cap.isOpened(): |
| | ret, frame = cap.read() |
| | if not ret: |
| | break |
| | |
| | |
| | metrics, annotated_frame = analyzer.process_frame(frame, display_video) |
| | |
| | |
| | fps_counter += 1 |
| | current_time = time.time() |
| | if current_time - prev_time >= 1.0: |
| | processing_fps = fps_counter / (current_time - prev_time) |
| | fps_counter = 0 |
| | prev_time = current_time |
| | |
| | |
| | frame_number += 1 |
| | progress = (frame_number / frame_count) * 100 if frame_count > 0 else 0 |
| | print(f"\rProgress: {progress:.1f}% (Frame {frame_number}/{frame_count})", end="") |
| | |
| | |
| | current_video_time = frame_number / fps if fps > 0 else 0 |
| | minutes = int(current_video_time // 60) |
| | seconds = int(current_video_time % 60) |
| | |
| | |
| | if display_video: |
| | |
| | cv2.putText(annotated_frame, f"Progress: {progress:.1f}%", |
| | (20, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| | |
| | |
| | cv2.putText(annotated_frame, f"Processing FPS: {processing_fps:.1f}", |
| | (20, 170), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| | |
| | |
| | cv2.putText(annotated_frame, f"Device: {DEVICE}", |
| | (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| | |
| | |
| | cv2.putText(annotated_frame, f"Time: {minutes:02d}:{seconds:02d}", |
| | (20, 230), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| | |
| | |
| | cv2.imshow("Interview Analysis", annotated_frame) |
| | |
| | |
| | if cv2.waitKey(1) & 0xFF == ord('q'): |
| | break |
| | |
| | |
| | cap.release() |
| | if display_video: |
| | cv2.destroyAllWindows() |
| | |
| | print("\nAnalysis complete!") |
| | |
| | |
| | assessment = analyzer.get_comprehensive_assessment() |
| | |
| | |
| | assessment['video_info'] = { |
| | "path": video_path, |
| | "frames": frame_count, |
| | "fps": fps, |
| | "duration_seconds": duration, |
| | "device_used": DEVICE |
| | } |
| | |
| | |
| | if save_results: |
| | output_file = video_path.split('/')[-1].split('.')[0] + '_interview_analysis.json' |
| | with open(output_file, 'w') as f: |
| | json.dump(assessment, f, indent=2) |
| | print(f"Results saved to {output_file}") |
| | |
| | |
| | print("\n--- Interview Assessment ---") |
| | print(f"Overall Score: {assessment['overall_score']:.1f}/10") |
| | print(f"Total frames analyzed: {assessment['key_statistics']['total_frames']}") |
| | print(f"Analysis duration: {assessment['key_statistics']['total_duration_seconds']:.2f} seconds") |
| | |
| | |
| | print("\n--- Eye Contact Statistics ---") |
| | print(f"Eye Contact Score: {assessment['eye_contact']['score']}/10") |
| | print(f"Eye contact percentage: {assessment['key_statistics']['eye_contact_percentage']:.2f}%") |
| | print(f"Longest eye contact: {assessment['key_statistics']['longest_eye_contact_seconds']:.2f} seconds") |
| | print(f"Average contact duration: {assessment['key_statistics']['average_contact_duration_seconds']:.2f} seconds") |
| | |
| | |
| | print("\nEye Contact Patterns:") |
| | for pattern in assessment['eye_contact']['patterns']: |
| | print(f"- {pattern}") |
| | |
| | print("\nEye Contact Recommendations:") |
| | for rec in assessment['eye_contact']['recommendations']: |
| | print(f"- {rec}") |
| | |
| | |
| | print("\n--- Body Language Statistics ---") |
| | print(f"Confidence Score: {assessment['body_language']['confidence_score']}/10") |
| | print(f"Engagement Score: {assessment['body_language']['engagement_score']}/10") |
| | print(f"Comfort Score: {assessment['body_language']['comfort_score']}/10") |
| | |
| | |
| | print("\nBody Language Strengths:") |
| | for strength in assessment['body_language']['strengths']: |
| | print(f"- {strength}") |
| | |
| | print("\nBody Language Areas for Improvement:") |
| | for area in assessment['body_language']['areas_for_improvement']: |
| | print(f"- {area}") |
| | |
| | print("\nBody Language Recommendations:") |
| | for rec in assessment['body_language']['recommendations']: |
| | print(f"- {rec}") |
| | |
| | return assessment |
| |
|
| | if __name__ == "__main__": |
| | |
| | video_path = "../../static/uploads/30a350b2-704d-4af3-89d3-567e3e2296bd.mp4" |
| | |
| | analyze_video_file(video_path, display_video=True, save_results=True) |