File size: 30,115 Bytes
8ae78b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
import logging
import time
import json
import pandas as pd
import cv2
from pathlib import Path
from typing import Dict, Any, Optional, Tuple
import os
import concurrent.futures

from app.utils.logging_utils import time_it, setup_logger
from app.utils.data_utils import json_to_dataframe
from app.core.config import settings
from app.services.processing.speech_service import SpeechService
from app.services.processing.emotion_analyzer import EmotionAnalyzer
from app.services.processing.ai_analysis import AIAnalysisService
from app.services.processing.eye_contact_analyzer import analyze_video_file as analyze_eye_contact_video
from app.services.processing.body_language_analyzer import analyze_video_file as analyze_body_language_video
from app.services.processing.ai_face_analyzer import AIFaceAnalyzer


# Configure logging
logger = setup_logger(__name__)

class VideoProcessor:
    """Service for processing videos."""
    
    def __init__(self):
        """Initialize the video processor."""
        self.speech_service = SpeechService()
        self.emotion_analyzer = EmotionAnalyzer()
        self.ai_analysis_service = AIAnalysisService()
    

    @time_it
    def process_video(
        self,
        video_path: str,
        frame_rate: int = 1,
        backend: str = 'mediapipe',
        language: str = 'en',
        generate_annotated_video: bool = False,
        video_id: Optional[str] = None,
        status_callback = None,
        min_face_confidence: float = 0.5,
        min_face_size_ratio: float = 0.05,
        save_emotion_stats: bool = True,
        skip_frames: int = 2,  # Default parameter, not used for frame sampling anymore
        adaptive_sampling: bool = False,  # Disable adaptive sampling to match test behavior
        analyze_eye_contact: bool = True,
        analyze_body_language: bool = True,
        analyze_face: bool = True,
        job_title: str = "Professional",
        model_name: str = "gpt-4o"
    ) -> Tuple[str, str]:
        """
        Process a video file for emotion analysis.
        
        Args:
            video_path: Path to the video file
            frame_rate: Process every nth frame (controls the sampling rate of frames for analysis)
            backend: Backend to use for face detection
            language: Language of the video
            generate_annotated_video: Whether to generate an annotated video
            video_id: ID of the video (optional)
            status_callback: Callback function for progress updates
            min_face_confidence: Minimum confidence for face detection
            min_face_size_ratio: Minimum face size as ratio of image dimensions
            save_emotion_stats: Whether to save detailed emotion statistics as JSON
            skip_frames: Legacy parameter, kept for backward compatibility but not used
            adaptive_sampling: Whether to use adaptive sampling
            analyze_eye_contact: Whether to analyze eye contact
            analyze_body_language: Whether to analyze body language
            analyze_face: Whether to analyze face
            job_title: Job title for face analysis
            
        Returns:
            Tuple of (transcript, analysis_json)
        """
        start_time = time.time()
        
        # Add debug info about the video path
        logger.info(f"DEBUG - Processing video path: {video_path}")
        logger.info(f"DEBUG - Video file exists: {os.path.exists(video_path)}")
        if os.path.exists(video_path):
            logger.info(f"DEBUG - Video file size: {os.path.getsize(video_path) / (1024*1024):.2f} MB")
        
        # Create results directory if it doesn't exist
        results_dir = settings.RESULTS_DIR
        os.makedirs(results_dir, exist_ok=True)
        
        # Update status if callback provided
        if status_callback:
            status_callback(5)  # 5% progress
        
        logger.info(f"Processing video: {video_path}")
        logger.info(f"Using backend: {backend}")
        logger.info(f"Language: {language}")
        
        # Force mediapipe backend for best GPU performance on Mac M3
        if backend == 'opencv' or not backend or backend == "retinaface":
            logger.info(f"Backend '{backend}' doesn't support GPU acceleration or is not recommended.")
            logger.info(f"Switching to 'mediapipe' for GPU-accelerated frame analysis.")
            backend = "mediapipe"
        
        # Ensure we're using a GPU-compatible backend
        if backend not in ['mediapipe', 'ssd', 'mtcnn']:
            logger.info(f"Backend '{backend}' may not be optimized for GPU acceleration.")
            logger.info(f"Consider using 'mediapipe' for best GPU performance.")
        
        # Define worker functions for parallel processing
        def process_speech(video_path, language):
            logger.info("Starting speech-to-text processing...")
            try:
                service = 'groq'
                transcript = self.speech_service.process_video_speech(video_path, language, service)
                logger.info(f"Speech-to-text completed. Text length: {len(transcript)} characters")
                return transcript
            except Exception as e:
                logger.error(f"Error during speech-to-text processing: {str(e)}")
                logger.warning("Continuing with empty transcript due to speech processing failure")
                return ""

        def process_eye_contact(video_path, model_name):
            logger.info("Starting eye contact analysis...")
            try:
                results = analyze_eye_contact_video(
                    video_path=video_path,
                    display_video=False,
                    save_results=False,
                    model_name=model_name
                )
                logger.info("Eye contact analysis completed successfully")
                return results
            except Exception as e:
                logger.error(f"Error during eye contact analysis: {str(e)}")
                logger.warning("Continuing without eye contact analysis")
                return None

        def process_body_language(video_path):
            logger.info("Starting body language analysis...")
            try:
                results = analyze_body_language_video(
                    video_path=video_path,
                    display_video=False,
                    save_results=False
                )
                logger.info("Body language analysis completed successfully")
                return results
            except Exception as e:
                logger.error(f"Error during body language analysis: {str(e)}")
                logger.warning("Continuing without body language analysis")
                return None

        def process_face_analysis(video_path, job_title):
            logger.info("Starting face analysis...")
            try:
                # Create a temp directory for extracted frames
                temp_frames_dir = Path("temp_face_frames")
                os.makedirs(temp_frames_dir, exist_ok=True)
                
                face_frames = []
                # Extract frames from the video
                cap = cv2.VideoCapture(video_path)
                if not cap.isOpened():
                    logger.error(f"Error: Could not open video file {video_path}")
                    return None
                
                # Get video properties
                frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                fps = cap.get(cv2.CAP_PROP_FPS)
                
                # Extract 3 evenly distributed frames
                num_frames = 3
                frame_indices = [int(i * frame_count / (num_frames + 1)) for i in range(1, num_frames + 1)]
                
                for i, frame_idx in enumerate(frame_indices):
                    cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
                    ret, frame = cap.read()
                    if ret:
                        # Generate filename
                        timestamp = frame_idx / fps if fps > 0 else 0
                        minutes = int(timestamp // 60)
                        seconds = int(timestamp % 60)
                        filename = f"frame_{i+1}_at_{minutes:02d}m{seconds:02d}s.jpg"
                        output_path = temp_frames_dir / filename
                        
                        # Save frame
                        cv2.imwrite(str(output_path), frame)
                        face_frames.append(str(output_path))
                
                cap.release()
                
                if face_frames:
                    # Analyze extracted frames
                    face_analyzer = AIFaceAnalyzer(provider="openai")
                    face_analysis_results = face_analyzer.analyze_profile_pictures(face_frames, job_title)
                    logger.info("Face analysis completed successfully")
                    return face_analysis_results
                else:
                    logger.warning("No frames were extracted for face analysis")
                    return None
            except Exception as e:
                logger.error(f"Error during face analysis: {str(e)}")
                logger.warning("Continuing without face analysis")
                return None

        def process_emotion_analysis(video_path, frame_rate, backend, generate_annotated_video, status_callback=None):
            logger.info(f"Starting emotion analysis with {backend} backend...")
            try:
                # Initialize emotion analyzer with custom parameters
                custom_emotion_analyzer = EmotionAnalyzer(
                    min_face_size_ratio=min_face_size_ratio,
                    min_confidence=min_face_confidence,
                    skip_similar_frames=False  # Explicitly disable frame similarity checks
                )
                
                # Use process_video_frames from EmotionAnalyzer
                all_results, annotated_video_path, timing_summary, metadata = custom_emotion_analyzer.process_video_frames(
                    video_path=video_path,
                    frame_rate=frame_rate,
                    backend=backend,
                    generate_annotated_video=generate_annotated_video,
                    status_callback=status_callback,  # Pass the received status_callback
                    adaptive_sampling=adaptive_sampling,
                    max_frames=1000
                )
                
                # Log timing summary and metadata for monitoring
                logger.info(f"Frame analysis timing summary: {timing_summary}")
                logger.info(f"Frame analysis metadata: {metadata}")
                logger.info(f"Total frames analyzed: {len(all_results)}")
                
                return all_results, annotated_video_path, timing_summary, metadata
            except Exception as e:
                logger.error(f"Error during emotion analysis: {str(e)}")
                return [], None, {}, {}
        
        # Execute tasks in parallel using ThreadPoolExecutor
        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
            # Start all tasks in parallel
            future_transcript = executor.submit(process_speech, video_path, language)
            
            futures = {}
            if analyze_eye_contact:
                futures['eye_contact'] = executor.submit(process_eye_contact, video_path, model_name)
            
            if analyze_body_language:
                futures['body_language'] = executor.submit(process_body_language, video_path)
            
            if analyze_face:
                futures['face'] = executor.submit(process_face_analysis, video_path, job_title)
            
            # Always submit emotion analysis
            futures['emotion'] = executor.submit(process_emotion_analysis, video_path, frame_rate, backend, generate_annotated_video, status_callback)
            
            # Wait for all tasks to complete and collect results
            transcript = future_transcript.result()
            
            eye_contact_results = futures['eye_contact'].result() if 'eye_contact' in futures else None
            body_language_results = futures['body_language'].result() if 'body_language' in futures else None
            face_analysis_results = futures['face'].result() if 'face' in futures else None
            
            all_results, annotated_video_path, timing_summary, metadata = futures['emotion'].result()
        
        # Update status after parallel processing
        if status_callback:
            status_callback(80)  # 80% progress
        
        print("********Body language results**************"  )
        print(body_language_results)
        print("********Eye contact results**************"  )
        print(eye_contact_results)
        print("********End of results**************"  )
        
        # Check if we have any emotion results
        if not all_results:
            logger.warning("No emotions detected in any frames.")
            empty_results = {
                'backend': [],
                'eye_contact_analysis': eye_contact_results if eye_contact_results else {},
                'body_language_analysis': body_language_results if body_language_results else {},
                'face_analysis': face_analysis_results if face_analysis_results else {}
            }
            empty_results_json = json.dumps(empty_results)
            return transcript, empty_results_json
        
        # Calculate emotion statistics
        emotion_stats = self._calculate_emotion_statistics(all_results)
        
        # Video info data
        cap = cv2.VideoCapture(video_path)
        video_fps = cap.get(cv2.CAP_PROP_FPS)
        video_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = video_frames / video_fps if video_fps > 0 else 0
        cap.release()
        
        # Create comprehensive results structure
        comprehensive_results = {
            "video_info": {
                "path": video_path,
                "frames": video_frames,
                "fps": video_fps,
                "duration_seconds": duration,
                "device_used": metadata.get("device", "unknown"),
                "backend": backend,
                "face_detection_params": {
                    "min_confidence": min_face_confidence,
                    "min_face_size_ratio": min_face_size_ratio
                }
            },
            "emotion_stats": emotion_stats,
            "frames_analyzed": len(all_results),
            "execution_stats": {
                "total_processing_time_seconds": timing_summary.get("total_time", 0),
                "avg_processing_time_seconds": timing_summary.get("avg_time_per_frame", 0),
                "timing_breakdown": {
                    "face_detection": metadata.get("detailed_timing", {}).get("face_detection", 0),
                    "emotion_analysis": metadata.get("detailed_timing", {}).get("emotion_analysis", 0),
                    "temporal_consistency": metadata.get("detailed_timing", {}).get("temporal_consistency", 0),
                    "cache_check": metadata.get("detailed_timing", {}).get("cache_check", 0),
                    "similarity_check": metadata.get("detailed_timing", {}).get("similarity_check", 0),
                    "total": timing_summary.get("avg_time_per_frame", 0)
                }
            }
        }
        
        # Add eye contact, body language, and face analysis results if available
        if eye_contact_results:
            comprehensive_results["eye_contact_analysis"] = eye_contact_results
        
        if body_language_results:
            comprehensive_results["body_language_analysis"] = body_language_results
        
        if face_analysis_results:
            comprehensive_results["face_analysis"] = face_analysis_results
        
        # Determine overall sentiment based on emotion_percentages
        dominant_emotion, _ = max(emotion_stats["emotion_percentages"].items(), key=lambda x: x[1], default=("neutral", 0))
        comprehensive_results["overall_sentiment"] = dominant_emotion.capitalize()
        
        # Print the JSON results to console for immediate feedback
        print("\n--- Comprehensive Analysis JSON Results ---")
        print(json.dumps(comprehensive_results, indent=2))
        print("--------------------------------------\n")
        
        # Process the results to ensure they have the required fields
        processed_results = self._process_emotion_results(all_results)
        
        # Convert results to DataFrame
        df = json_to_dataframe({'backend': processed_results})
        
        # Store original emotion data from emotion_stats on the DataFrame
        if emotion_stats["emotion_percentages"]:
            # Use the emotion_percentages data for all rows
            df['raw_emotion_data'] = [emotion_stats["emotion_percentages"]] * len(df)
            
            # Add confidence data as a separate field
            confidence_data = {
                "confidence_by_emotion": emotion_stats["confidence_by_emotion"],
                "average_confidence": emotion_stats["average_confidence"]
            }
            df['confidence_data'] = [confidence_data] * len(df)
            
            # Add overall sentiment to each row
            df['overall_sentiment'] = comprehensive_results["overall_sentiment"]
            
            logger.info(f"Added emotion percentages data to DataFrame: {emotion_stats['emotion_percentages']}")
            logger.info(f"Added confidence data to DataFrame: {confidence_data}")
            logger.info(f"Added overall sentiment to DataFrame: {comprehensive_results['overall_sentiment']}")
        else:
            logger.warning("No emotion data found to add to DataFrame")
        
        # Check if we have emotion data
        if df.empty:
            logger.warning("No emotions detected, cannot generate analysis.")
            # Use the already processed results if available, or create empty list if not
            if 'processed_results' not in locals():
                processed_results = []
            empty_results = {
                'backend': processed_results,
                'eye_contact_analysis': eye_contact_results if eye_contact_results else {},
                'body_language_analysis': body_language_results if body_language_results else {},
                'face_analysis': face_analysis_results if face_analysis_results else {}
            }
            empty_results_json = json.dumps(empty_results)
            return transcript, empty_results_json
        
        # Perform AI analysis
        logger.info("Starting AI analysis...")
        try:
            # Log the data being passed to the AI analysis
            if eye_contact_results:
                logger.info(f"Passing eye_contact_data to AI analysis with {len(str(eye_contact_results))} characters")
            else:
                logger.info("No eye_contact_data available to pass to AI analysis")
                
            if body_language_results:
                logger.info(f"Passing body_language_data to AI analysis with {len(str(body_language_results))} characters")
            else:
                logger.info("No body_language_data available to pass to AI analysis")
                
            if face_analysis_results:
                logger.info(f"Passing face_analysis_data to AI analysis with {len(str(face_analysis_results))} items")
            else:
                logger.info("No face_analysis_data available to pass to AI analysis")
                
            analysis = self.ai_analysis_service.analyze_emotions_and_transcript(
                df, 
                transcript, 
                language,
                eye_contact_data=eye_contact_results,
                body_language_data=body_language_results,
                face_analysis_data=face_analysis_results,
                model_name=model_name
            )
        except Exception as e:
            logger.error(f"Error during AI analysis: {str(e)}")
            results_with_error = {
                'backend': processed_results, 
                'error': str(e),
                'eye_contact_analysis': eye_contact_results if eye_contact_results else {},
                'body_language_analysis': body_language_results if body_language_results else {},
                'face_analysis': face_analysis_results if face_analysis_results else {}
            }
            results_json = json.dumps(results_with_error)
            return transcript, results_json
        
        # Update status
        if status_callback:
            status_callback(100)  # 100% progress
        
        # Log total processing time
        end_time = time.time()
        total_time_taken = end_time - start_time
        logger.info(f"Total processing time: {total_time_taken:.2f} seconds")
        
        # Convert analysis to JSON
        analysis_json = json.dumps(analysis)
        
        return transcript, analysis_json
    
    def _calculate_emotion_statistics(self, all_results):
        """Calculate comprehensive emotion statistics from frame results."""
        # Count frames with faces
        frames_with_faces = 0
        total_faces = 0
        total_confidence = 0
        
        emotion_counts = {
            "angry": 0,
            "disgust": 0,
            "fear": 0,
            "happy": 0,
            "sad": 0,
            "surprise": 0,
            "neutral": 0
        }
        
        confidence_by_emotion = {emotion: [] for emotion in emotion_counts.keys()}
        
        # Process each frame result
        for result in all_results:
            faces = result.get("faces", [])
            if faces:
                frames_with_faces += 1
                total_faces += len(faces)
                
                # Count main emotion if available
                if "main_emotion" in result:
                    main_emotion = result["main_emotion"]["emotion"]
                    confidence = result["main_emotion"]["confidence"]
                    
                    if main_emotion in emotion_counts:
                        emotion_counts[main_emotion] += 1
                        confidence_by_emotion[main_emotion].append(confidence)
                        total_confidence += confidence
                # Otherwise check each face for emotions
                else:
                    for face in faces:
                        if "emotion" in face:
                            # Find dominant emotion for this face
                            dominant_emotion = max(face["emotion"].items(), key=lambda x: x[1])
                            emotion_name = dominant_emotion[0]
                            confidence = dominant_emotion[1]
                            
                            if emotion_name in emotion_counts:
                                emotion_counts[emotion_name] += 1
                                confidence_by_emotion[emotion_name].append(confidence)
                                total_confidence += confidence
        
        # Calculate percentages
        total_emotions = sum(emotion_counts.values())
        emotion_percentages = {}
        if total_emotions > 0:
            for emotion, count in emotion_counts.items():
                emotion_percentages[emotion] = (count / total_emotions) * 100
        
        # Calculate face detection percentage
        face_detection_percentage = 0
        if all_results:
            face_detection_percentage = (frames_with_faces / len(all_results)) * 100
        
        # Calculate average confidence
        average_confidence = 0
        if total_emotions > 0:
            average_confidence = total_confidence / total_emotions
        
        # Calculate average confidence by emotion
        confidence_averages = {}
        for emotion, confidences in confidence_by_emotion.items():
            if confidences:
                confidence_averages[emotion] = sum(confidences) / len(confidences)
            else:
                confidence_averages[emotion] = 0
        
        # Create emotion statistics
        emotion_stats = {
            "frames_with_faces": frames_with_faces,
            "face_detection_percentage": face_detection_percentage,
            "emotion_counts": emotion_counts,
            "emotion_percentages": emotion_percentages,
            "average_confidence": average_confidence,
            "confidence_by_emotion": confidence_averages
        }
            
        return emotion_stats
    
    def _process_emotion_results(self, all_results):
        """Process emotion results to ensure they have required fields."""
        processed_results = []
        
        # Process all results
        for result in all_results:
            # Skip empty results
            if not result:
                continue
                
            # Process faces to ensure they have dominant_emotion and emotion_confidence
            if 'faces' in result and result['faces']:
                for face in result['faces']:
                    # If face has emotion data but no dominant_emotion, calculate it
                    if 'emotion' in face and 'dominant_emotion' not in face:
                        emotions = face['emotion']
                        if emotions:
                            # Find dominant emotion and its confidence
                            dominant_emotion, confidence = max(emotions.items(), key=lambda x: x[1])
                            face['dominant_emotion'] = dominant_emotion
                            face['emotion_confidence'] = confidence
                            face['emotion_stable'] = face.get('emotion_stable', False)
            
            # Process main_face if it exists
            if 'main_face' in result and result['main_face']:
                main_face = result['main_face']
                if 'emotion' in main_face and 'dominant_emotion' not in main_face:
                    emotions = main_face['emotion']
                    if emotions:
                        # Find dominant emotion and its confidence
                        dominant_emotion, confidence = max(emotions.items(), key=lambda x: x[1])
                        main_face['dominant_emotion'] = dominant_emotion
                        main_face['emotion_confidence'] = confidence
                        main_face['emotion_stable'] = main_face.get('emotion_stable', False)
            
            # Process main_emotion if it exists
            if 'main_emotion' in result and result['main_emotion']:
                main_emotion = result['main_emotion']
                # If main_emotion has emotion but not confidence, add it
                if 'emotion' in main_emotion and 'confidence' not in main_emotion:
                    # Try to get confidence from main_face
                    if 'main_face' in result and result['main_face'] and 'emotion' in result['main_face']:
                        emotion_name = main_emotion['emotion']
                        main_emotion['confidence'] = result['main_face']['emotion'].get(emotion_name, 0)
            
            processed_results.append(result)
        
        return processed_results

# Create a singleton instance
video_processor = VideoProcessor()

# Function to maintain backward compatibility
def process_video(
    video_path: str,
    frame_rate: int = 1,
    backend: str = 'mediapipe',
    language: str = 'en',
    generate_annotated_video: bool = False,
    video_id: Optional[str] = None,
    status_callback = None,
    min_face_confidence: float = 0.5,
    min_face_size_ratio: float = 0.05,
    save_emotion_stats: bool = True,
    skip_frames: int = 2,  # Default parameter, not used for frame sampling anymore
    adaptive_sampling: bool = False,  # Control whether adaptive sampling is used
    analyze_eye_contact: bool = True,
    analyze_body_language: bool = True,
    analyze_face: bool = True,
    job_title: str = "Professional",
    model_name: str = "gpt-4o"
) -> Tuple[str, str]:
    """
    Process a video file for emotion analysis (backward compatibility function).
    
    Args:
        video_path: Path to the video file
        frame_rate: Process every nth frame (controls the sampling rate of frames for analysis)
        backend: Backend to use for face detection
        language: Language of the video
        generate_annotated_video: Whether to generate an annotated video
        video_id: ID of the video (optional)
        status_callback: Callback function for progress updates
        min_face_confidence: Minimum confidence for face detection
        min_face_size_ratio: Minimum face size as ratio of image dimensions
        save_emotion_stats: Whether to save detailed emotion statistics as JSON
        skip_frames: Legacy parameter, kept for backward compatibility but not used
        adaptive_sampling: Whether to use adaptive sampling
        analyze_eye_contact: Whether to analyze eye contact
        analyze_body_language: Whether to analyze body language
        analyze_face: Whether to analyze face
        job_title: Job title for face analysis
        model_name: The name of the model to use for AI analysis
        
    Returns:
        Tuple of (transcript, analysis_json)
    """
    return video_processor.process_video(
        video_path=video_path,
        frame_rate=frame_rate,
        backend=backend,
        language=language,
        generate_annotated_video=generate_annotated_video,
        video_id=video_id,
        status_callback=status_callback,
        min_face_confidence=min_face_confidence,
        min_face_size_ratio=min_face_size_ratio,
        save_emotion_stats=save_emotion_stats,
        skip_frames=skip_frames,
        adaptive_sampling=adaptive_sampling,
        analyze_eye_contact=analyze_eye_contact,
        analyze_body_language=analyze_body_language,
        analyze_face=analyze_face,
        job_title=job_title,
        model_name=model_name
    )