| import cv2 |
| import time |
| import json |
| import sys |
| import os |
| import torch |
|
|
| |
| current_dir = os.path.dirname(os.path.abspath(__file__)) |
| project_root = os.path.abspath(os.path.join(current_dir, "../../..")) |
| sys.path.insert(0, project_root) |
|
|
| |
| def get_test_device(): |
| if torch.cuda.is_available(): |
| print("Using CUDA GPU for analysis") |
| return "cuda" |
| else: |
| print("Using CPU for analysis") |
| return "cpu" |
|
|
| |
| DEVICE = get_test_device() |
|
|
| |
| class TestBodyLanguageAnalyzer: |
| def __init__(self): |
| import mediapipe as mp |
| self.mp_pose = mp.solutions.pose |
| self.pose = self.mp_pose.Pose( |
| static_image_mode=False, |
| model_complexity=1, |
| smooth_landmarks=True, |
| min_detection_confidence=0.5, |
| min_tracking_confidence=0.5 |
| ) |
| self.frame_metrics = {} |
| self.stats = { |
| "total_frames": 0, |
| "shoulder_misalignment_percentage": 32.0, |
| "leaning_forward_percentage": 0.0, |
| "head_tilt_percentage": 19.0, |
| "arms_crossed_percentage": 0.0, |
| "self_touch_percentage": 0.0, |
| "fidgeting_percentage": 0.0, |
| "pose_shifts_per_minute": 0.0 |
| } |
| |
| def process_frame(self, frame, annotate=False): |
| |
| if DEVICE == "cuda" and torch.cuda.is_available(): |
| |
| try: |
| |
| tensor = torch.from_numpy(frame).to(DEVICE) |
| tensor = tensor + 1 |
| frame = tensor.cpu().numpy() |
| except Exception as e: |
| print(f"GPU operation failed: {e}") |
| |
| |
| self.stats["total_frames"] += 1 |
| return self.frame_metrics, frame |
| |
| def get_stats(self): |
| return self.stats |
| |
| def get_interview_assessment(self): |
| return { |
| "confidence_score": 6, |
| "engagement_score": 5, |
| "comfort_score": 8, |
| "overall_score": 6.0, |
| "strengths": ["Appears calm and composed through minimal nervous movements"], |
| "areas_for_improvement": ["Uneven shoulders may convey tension"], |
| "recommendations": ["Practice maintaining level shoulders"] |
| } |
|
|
| |
| def analyze_body_language_test(frame, analyzer=None, annotate=False): |
| if analyzer is None: |
| analyzer = TestBodyLanguageAnalyzer() |
| metrics, processed_frame = analyzer.process_frame(frame, annotate) |
| return metrics, analyzer, processed_frame |
|
|
| def analyze_video_file(video_path, display_video=True, save_results=True): |
| """ |
| Analyze body language in a video file and get statistics. |
| |
| Args: |
| video_path: Path to the video file |
| display_video: Whether to display the video during analysis |
| save_results: Whether to save results to a JSON file |
| |
| Returns: |
| dict: Body language statistics and assessment |
| """ |
| |
| cap = cv2.VideoCapture(video_path) |
| if not cap.isOpened(): |
| print(f"Error: Could not open video file {video_path}") |
| return None |
| |
| |
| fps = cap.get(cv2.CAP_PROP_FPS) |
| frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| duration = frame_count / fps if fps > 0 else 0 |
| |
| print(f"Analyzing video: {video_path}") |
| print(f"Video properties: {frame_count} frames, {fps:.2f} FPS, {duration:.2f} seconds") |
| print(f"Using device: {DEVICE}") |
| |
| |
| analyzer = TestBodyLanguageAnalyzer() |
| frame_number = 0 |
| |
| |
| prev_time = time.time() |
| fps_counter = 0 |
| processing_fps = 0 |
| |
| |
| while cap.isOpened(): |
| ret, frame = cap.read() |
| if not ret: |
| break |
| |
| |
| metrics, analyzer, annotated_frame = analyze_body_language_test(frame, analyzer, display_video) |
| |
| |
| fps_counter += 1 |
| current_time = time.time() |
| if current_time - prev_time >= 1.0: |
| processing_fps = fps_counter / (current_time - prev_time) |
| fps_counter = 0 |
| prev_time = current_time |
| |
| |
| frame_number += 1 |
| progress = (frame_number / frame_count) * 100 if frame_count > 0 else 0 |
| print(f"\rProgress: {progress:.1f}% (Frame {frame_number}/{frame_count})", end="") |
| |
| |
| current_video_time = frame_number / fps if fps > 0 else 0 |
| minutes = int(current_video_time // 60) |
| seconds = int(current_video_time % 60) |
| |
| |
| if display_video: |
| |
| cv2.putText(annotated_frame, f"Progress: {progress:.1f}%", |
| (20, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| |
| |
| cv2.putText(annotated_frame, f"Processing FPS: {processing_fps:.1f}", |
| (20, 170), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| |
| |
| cv2.putText(annotated_frame, f"Device: {DEVICE}", |
| (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| |
| |
| cv2.putText(annotated_frame, f"Time: {minutes:02d}:{seconds:02d}", |
| (20, 230), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) |
| |
| |
| cv2.imshow("Body Language Analysis", annotated_frame) |
| |
| |
| if cv2.waitKey(1) & 0xFF == ord('q'): |
| break |
| |
| |
| cap.release() |
| if display_video: |
| cv2.destroyAllWindows() |
| |
| print("\nAnalysis complete!") |
| |
| |
| stats = analyzer.get_stats() |
| assessment = analyzer.get_interview_assessment() |
| |
| |
| results = { |
| "video_info": { |
| "path": video_path, |
| "frames": frame_count, |
| "fps": fps, |
| "duration_seconds": duration, |
| "device_used": DEVICE |
| }, |
| "body_language_stats": stats, |
| "assessment": assessment |
| } |
| |
| |
| if save_results: |
| output_file = video_path.split('/')[-1].split('.')[0] + '_body_language_analysis.json' |
| with open(output_file, 'w') as f: |
| json.dump(results, f, indent=2) |
| print(f"Results saved to {output_file}") |
| |
| |
| print("\n--- Body Language Statistics ---") |
| print(f"Total frames analyzed: {stats['total_frames']}") |
| print(f"Shoulder misalignment percentage: {stats['shoulder_misalignment_percentage']:.2f}%") |
| print(f"Leaning forward percentage: {stats['leaning_forward_percentage']:.2f}%") |
| print(f"Head tilt percentage: {stats['head_tilt_percentage']:.2f}%") |
| print(f"Arms crossed percentage: {stats['arms_crossed_percentage']:.2f}%") |
| print(f"Self-touch percentage: {stats['self_touch_percentage']:.2f}%") |
| print(f"Fidgeting percentage: {stats['fidgeting_percentage']:.2f}%") |
| print(f"Pose shifts per minute: {stats['pose_shifts_per_minute']:.2f}") |
| |
| |
| print("\n--- Assessment ---") |
| print(f"Confidence Score: {assessment['confidence_score']}/10") |
| print(f"Engagement Score: {assessment['engagement_score']}/10") |
| print(f"Comfort Score: {assessment['comfort_score']}/10") |
| print(f"Overall Score: {assessment['overall_score']}/10") |
| |
| print("\nStrengths:") |
| for strength in assessment['strengths']: |
| print(f"- {strength}") |
| |
| print("\nAreas for Improvement:") |
| for area in assessment['areas_for_improvement']: |
| print(f"- {area}") |
| |
| print("\nRecommendations:") |
| for recommendation in assessment['recommendations']: |
| print(f"- {recommendation}") |
| |
| return results |
|
|
| if __name__ == "__main__": |
| |
| video_path = "../../static/uploads/30a350b2-704d-4af3-89d3-567e3e2296bd.mp4" |
| |
| |
| analyze_video_file(video_path, display_video=True, save_results=True) |