test / behavior_backend /tests /services /test_emotions_gpu.py
hibatorrahmen's picture
Add backend application and Dockerfile
8ae78b0
import os
import cv2
import json
import time
import pytest
import numpy as np
import torch
from pathlib import Path
from datetime import datetime
# Import the emotion analyzer functions
import sys
sys.path.append(str(Path(__file__).resolve().parents[2])) # Add behavior_backend to path
from app.services.processing.emotion_analyzer import process_video_frames, EmotionCNN, load_emotion_model
# Function to ensure directory exists
def ensure_dir(directory):
"""Create directory if it doesn't exist."""
os.makedirs(directory, exist_ok=True)
class TestEmotionGPU:
"""Test class for GPU-based emotion analysis."""
@pytest.fixture
def test_video_path(self):
"""Fixture to provide a test video path."""
# Using a sample video from the videos directory
base_dir = Path(__file__).resolve().parents[2] # Go up to behavior_backend directory
# Try to find a sample video from common locations
potential_videos = [
base_dir / "videos" / "sample.mp4",
base_dir / "videos" / "test.mp4",
base_dir / "static" / "samples" / "sample.mp4",
base_dir / "tests" / "data" / "sample.mp4"
]
for video_path in potential_videos:
if video_path.exists():
return str(video_path)
# Fallback - create an empty dummy video file if no sample videos exist
fallback_path = base_dir / "tests" / "output" / "dummy_test_video.mp4"
ensure_dir(fallback_path.parent)
if not fallback_path.exists():
# Create a small dummy video
width, height = 640, 480
fps = 30
duration = 3 # seconds
# Create video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(str(fallback_path), fourcc, fps, (width, height))
# Generate frames with a face
for i in range(fps * duration):
# Create a frame with a face-like shape
frame = np.zeros((height, width, 3), dtype=np.uint8)
# Add a face-like shape for testing
cv2.circle(frame, (width//2, height//2), 100, (200, 200, 200), -1)
cv2.circle(frame, (width//2-30, height//2-30), 15, (0, 0, 0), -1) # Left eye
cv2.circle(frame, (width//2+30, height//2-30), 15, (0, 0, 0), -1) # Right eye
cv2.ellipse(frame, (width//2, height//2+30), (50, 20), 0, 0, 180, (0, 0, 0), -1) # Mouth
# Write the frame
out.write(frame)
# Release the writer
out.release()
return str(fallback_path)
@pytest.fixture
def output_dir(self):
"""Fixture to provide an output directory for test results."""
base_dir = Path(__file__).resolve().parents[2] # Go up to behavior_backend directory
output_path = str(base_dir / "tests" / "output" / "emotions_gpu")
ensure_dir(output_path)
return output_path
def test_gpu_detection(self):
"""Test GPU detection functionality."""
# Check if CUDA is available
print(f"CUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"CUDA device count: {torch.cuda.device_count()}")
print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
# Test CUDA functionality
try:
x = torch.tensor([1.0, 2.0, 3.0], device='cuda')
y = x * 2
result = y.cpu().numpy()
assert np.array_equal(result, np.array([2.0, 4.0, 6.0])), "CUDA computation failed"
print("CUDA test passed!")
except Exception as e:
print(f"CUDA test failed: {e}")
def test_model_to_gpu(self):
"""Test moving the emotion model to GPU."""
if not torch.cuda.is_available():
pytest.skip("CUDA not available, skipping GPU model test")
try:
# Load emotion model
model = load_emotion_model()
# Move to GPU
model.to('cuda')
# Test with a sample input
sample_input = torch.rand(1, 1, 48, 48, device='cuda')
# Run inference
with torch.no_grad():
output = model(sample_input)
# Check output shape (should be [1, 7] for 7 emotions)
assert output.shape == (1, 7), f"Expected output shape (1, 7), got {output.shape}"
# Apply softmax to get probabilities
probs = torch.nn.functional.softmax(output, dim=1)
# Sum should be close to 1
assert torch.abs(torch.sum(probs) - 1.0) < 1e-5, "Probabilities don't sum to 1"
print("Model GPU test passed!")
except Exception as e:
print(f"Model GPU test failed: {e}")
assert False, f"Failed to run model on GPU: {e}"
def test_process_video_frames_gpu(self, test_video_path, output_dir):
"""Test processing video frames with GPU acceleration."""
# Define output paths
results_path = os.path.join(output_dir, f"gpu_emotion_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json")
# Process the video with GPU if available
use_gpu = torch.cuda.is_available()
if not use_gpu:
print("CUDA not available, test will run on CPU")
# Process video frames
start_time = time.time()
results = process_video_frames(
test_video_path,
sample_rate=5, # Process every 5th frame for speed
use_gpu=use_gpu
)
end_time = time.time()
# Save results to JSON
with open(results_path, 'w') as f:
json.dump(results, f, indent=2)
# Log processing time
processing_time = end_time - start_time
print(f"Processing time: {processing_time:.2f} seconds")
print(f"Saved results to {results_path}")
# Verify results
assert isinstance(results, list), "Results should be a list"
# Calculate faces detected
frames_with_faces = sum(1 for frame in results if frame.get('faces'))
total_frames = len(results)
detection_rate = (frames_with_faces / total_frames * 100) if total_frames > 0 else 0
print(f"Frames processed: {total_frames}")
print(f"Frames with faces: {frames_with_faces} ({detection_rate:.1f}%)")
# Check if GPU was used when requested and available
if use_gpu:
gpu_frames = sum(1 for frame in results if frame.get('gpu_used', False))
gpu_usage_rate = (gpu_frames / total_frames * 100) if total_frames > 0 else 0
print(f"Frames processed with GPU: {gpu_frames} ({gpu_usage_rate:.1f}%)")
# If GPU was requested and available, it should be used
assert gpu_frames > 0, "GPU was available but not used for any frames"
# Check for emotion data
frames_with_emotions = 0
emotion_values = []
for frame in results:
for face in frame.get('faces', []):
if 'emotions' in face:
frames_with_emotions += 1
# Collect emotion values for verification
for emotion, value in face['emotions'].items():
emotion_values.append(value)
if frames_with_faces > 0:
assert frames_with_emotions > 0, "No frames with emotion data found"
assert len(emotion_values) > 0, "No emotion values recorded"
# Check that emotion values are valid (between 0 and 1)
for value in emotion_values:
assert 0 <= value <= 1, f"Emotion value {value} outside valid range [0, 1]"
print(f"Test completed successfully - processed {total_frames} frames with {frames_with_faces} faces detected")
if __name__ == "__main__":
# Run the tests directly
test = TestEmotionGPU()
test_video_path = test.test_video_path()
output_dir = test.output_dir()
# Run tests
print("\n=== Testing GPU Detection ===")
test.test_gpu_detection()
print("\n=== Testing Model to GPU ===")
try:
test.test_model_to_gpu()
except Exception as e:
print(f"Model to GPU test failed: {e}")
print("\n=== Testing Process Video Frames with GPU ===")
test.test_process_video_frames_gpu(test_video_path, output_dir)