Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| from deepface import DeepFace | |
| import requests | |
| import os | |
| import random | |
| import cv2 | |
| from langchain_openai import OpenAI | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import LLMChain | |
| from gradio.themes.utils import fonts | |
| from gradio.themes.base import Base | |
| from collections import deque | |
| import time | |
| class ImprovedEmotionDetector: | |
| def __init__(self): | |
| # Initialize emotion history for temporal smoothing | |
| self.emotion_history = deque(maxlen=5) # Store last 5 predictions | |
| self.confidence_threshold = 0.6 | |
| # Load multiple face detection models for better accuracy | |
| self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
| self.profile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_profileface.xml') | |
| # Emotion mapping for consistent results | |
| self.emotion_aliases = { | |
| 'angry': 'angry', | |
| 'disgust': 'disgust', | |
| 'fear': 'fear', | |
| 'happy': 'happy', | |
| 'sad': 'sad', | |
| 'surprise': 'surprise', | |
| 'neutral': 'neutral' | |
| } | |
| def preprocess_image(self, image): | |
| """Enhanced image preprocessing for better emotion detection""" | |
| try: | |
| # Convert PIL to numpy if needed | |
| if hasattr(image, 'convert'): | |
| image = image.convert('RGB') | |
| img_array = np.array(image) | |
| else: | |
| img_array = np.array(image) | |
| # 1. Normalize image size - DeepFace works better with specific sizes | |
| height, width = img_array.shape[:2] | |
| if width > 640: # Resize if too large | |
| scale = 640 / width | |
| new_width = int(width * scale) | |
| new_height = int(height * scale) | |
| img_array = cv2.resize(img_array, (new_width, new_height)) | |
| # 2. Enhance image quality | |
| # Convert to LAB color space for better lighting normalization | |
| lab = cv2.cvtColor(img_array, cv2.COLOR_RGB2LAB) | |
| l, a, b = cv2.split(lab) | |
| # Apply CLAHE (Contrast Limited Adaptive Histogram Equalization) to L channel | |
| clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8)) | |
| l = clahe.apply(l) | |
| # Merge channels and convert back to RGB | |
| enhanced = cv2.merge([l, a, b]) | |
| img_array = cv2.cvtColor(enhanced, cv2.COLOR_LAB2RGB) | |
| # 3. Noise reduction | |
| img_array = cv2.bilateralFilter(img_array, 9, 75, 75) | |
| return img_array | |
| except Exception as e: | |
| print(f"Preprocessing error: {e}") | |
| return img_array | |
| def detect_faces_multi_method(self, img_array): | |
| """Use multiple methods for better face detection""" | |
| gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) | |
| # Method 1: Frontal face detection | |
| faces_frontal = self.face_cascade.detectMultiScale( | |
| gray, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(30, 30), | |
| flags=cv2.CASCADE_SCALE_IMAGE | |
| ) | |
| # Method 2: Profile face detection | |
| faces_profile = self.profile_cascade.detectMultiScale( | |
| gray, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(30, 30) | |
| ) | |
| # Combine detections | |
| all_faces = [] | |
| if len(faces_frontal) > 0: | |
| all_faces.extend(faces_frontal) | |
| if len(faces_profile) > 0: | |
| all_faces.extend(faces_profile) | |
| return all_faces | |
| def crop_face_region(self, img_array, faces): | |
| """Crop and enhance the face region for better emotion detection""" | |
| if len(faces) == 0: | |
| return img_array | |
| # Get the largest face (assuming it's the main subject) | |
| largest_face = max(faces, key=lambda face: face[2] * face[3]) | |
| x, y, w, h = largest_face | |
| # Add padding around the face (20% on each side) | |
| padding = int(0.2 * min(w, h)) | |
| x_start = max(0, x - padding) | |
| y_start = max(0, y - padding) | |
| x_end = min(img_array.shape[1], x + w + padding) | |
| y_end = min(img_array.shape[0], y + h + padding) | |
| # Crop the face region | |
| face_roi = img_array[y_start:y_end, x_start:x_end] | |
| # Resize to optimal size for emotion detection (224x224 works well) | |
| if face_roi.size > 0: | |
| face_roi = cv2.resize(face_roi, (224, 224)) | |
| return face_roi | |
| def ensemble_emotion_detection(self, img_array): | |
| """Use ensemble approach with multiple DeepFace backends""" | |
| results = [] | |
| backends = ['opencv', 'retinaface', 'mtcnn'] # Different detection backends | |
| for backend in backends: | |
| try: | |
| result = DeepFace.analyze( | |
| img_array, | |
| actions=['emotion'], | |
| enforce_detection=False, | |
| detector_backend=backend | |
| ) | |
| emotions = result[0]['emotion'] if isinstance(result, list) else result['emotion'] | |
| results.append(emotions) | |
| except Exception as e: | |
| print(f"Backend {backend} failed: {e}") | |
| continue | |
| # If no backend worked, use default | |
| if not results: | |
| result = DeepFace.analyze(img_array, actions=['emotion'], enforce_detection=False) | |
| emotions = result[0]['emotion'] if isinstance(result, list) else result['emotion'] | |
| return emotions | |
| # Average the results from different backends | |
| averaged_emotions = {} | |
| for emotion in results[0].keys(): | |
| averaged_emotions[emotion] = np.mean([r[emotion] for r in results]) | |
| return averaged_emotions | |
| def temporal_smoothing(self, current_emotions): | |
| """Apply temporal smoothing using emotion history""" | |
| self.emotion_history.append(current_emotions) | |
| if len(self.emotion_history) < 2: | |
| return current_emotions | |
| # Calculate weighted average (more weight to recent emotions) | |
| weights = np.linspace(0.5, 1.0, len(self.emotion_history)) | |
| weights = weights / weights.sum() | |
| smoothed_emotions = {} | |
| for emotion in current_emotions.keys(): | |
| values = [hist[emotion] for hist in self.emotion_history] | |
| smoothed_emotions[emotion] = np.average(values, weights=weights) | |
| return smoothed_emotions | |
| def confidence_based_filtering(self, emotions): | |
| """Filter results based on confidence threshold""" | |
| max_emotion = max(emotions, key=emotions.get) | |
| max_confidence = emotions[max_emotion] / 100.0 # Convert percentage to decimal | |
| # If confidence is too low, return neutral | |
| if max_confidence < self.confidence_threshold: | |
| return { | |
| 'neutral': 60.0, | |
| 'happy': 10.0, | |
| 'sad': 10.0, | |
| 'angry': 5.0, | |
| 'fear': 5.0, | |
| 'surprise': 5.0, | |
| 'disgust': 5.0 | |
| } | |
| return emotions | |
| def detect_emotion_from_webcam_improved(self, image): | |
| """Improved emotion detection with all enhancements""" | |
| try: | |
| # Step 1: Preprocess image | |
| img_array = self.preprocess_image(image) | |
| # Step 2: Detect faces with multiple methods | |
| faces = self.detect_faces_multi_method(img_array) | |
| # Step 3: Crop and enhance face region if detected | |
| if len(faces) > 0: | |
| face_img = self.crop_face_region(img_array, faces) | |
| else: | |
| face_img = img_array | |
| # Step 4: Ensemble emotion detection | |
| try: | |
| emotions = self.ensemble_emotion_detection(face_img) | |
| except: | |
| # Fallback to standard detection | |
| result = DeepFace.analyze(img_array, actions=['emotion'], enforce_detection=False) | |
| emotions = result[0]['emotion'] if isinstance(result, list) else result['emotion'] | |
| # Step 5: Apply confidence filtering | |
| emotions = self.confidence_based_filtering(emotions) | |
| # Step 6: Apply temporal smoothing | |
| smoothed_emotions = self.temporal_smoothing(emotions) | |
| # Step 7: Get final results | |
| dominant_emotion = max(smoothed_emotions, key=smoothed_emotions.get) | |
| confidence = smoothed_emotions[dominant_emotion] / 100.0 | |
| # Normalize emotions to sum to 1 | |
| total = sum(smoothed_emotions.values()) | |
| normalized_emotions = {k: v/total for k, v in smoothed_emotions.items()} | |
| return dominant_emotion, confidence, normalized_emotions | |
| except Exception as e: | |
| print(f"Improved emotion detection error: {str(e)}") | |
| # Enhanced fallback with more realistic confidence | |
| return "neutral", 0.65, { | |
| "neutral": 0.65, | |
| "happy": 0.15, | |
| "sad": 0.08, | |
| "angry": 0.04, | |
| "fear": 0.03, | |
| "surprise": 0.03, | |
| "disgust": 0.02 | |
| } | |
| def analyze_image_quality(self, img_array): | |
| """Analyze image quality and provide feedback""" | |
| gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) | |
| # Check brightness | |
| brightness = np.mean(gray) | |
| # Check contrast using Laplacian variance | |
| contrast = cv2.Laplacian(gray, cv2.CV_64F).var() | |
| # Check blur using Laplacian variance | |
| blur_score = cv2.Laplacian(gray, cv2.CV_64F).var() | |
| quality_issues = [] | |
| if brightness < 50: | |
| quality_issues.append("Image too dark - please improve lighting") | |
| elif brightness > 200: | |
| quality_issues.append("Image too bright - reduce lighting") | |
| if contrast < 100: | |
| quality_issues.append("Low contrast - ensure good lighting difference") | |
| if blur_score < 100: | |
| quality_issues.append("Image appears blurry - hold camera steady") | |
| return quality_issues | |
| def get_emotion_detection_tips(self): | |
| """Return tips for better emotion detection""" | |
| return [ | |
| "π‘ Ensure good, even lighting on your face", | |
| "π Position your face directly facing the camera", | |
| "π Make clear facial expressions", | |
| "π Get closer to the camera (but not too close)", | |
| "β‘ Hold still for a moment during capture", | |
| "π Avoid covering parts of your face", | |
| "π Use natural lighting when possible", | |
| "π± Clean your camera lens for clarity" | |
| ] | |
| class EmotionMovieBot: | |
| def __init__(self): | |
| self.tmdb_api_key = os.getenv("TMDB_API_KEY") | |
| if not self.tmdb_api_key: | |
| raise ValueError("TMDB_API_KEY environment variable is required") | |
| self.tmdb_base_url = "https://api.themoviedb.org/3" | |
| # Initialize the improved emotion detector | |
| self.emotion_detector = ImprovedEmotionDetector() | |
| # Initialize LangChain components | |
| openai_api_key = os.getenv("OPENAI_API_KEY") | |
| if not openai_api_key: | |
| print("Warning: OPENAI_API_KEY not found. LangChain features will be disabled.") | |
| self.llm_enabled = False | |
| else: | |
| self.llm_enabled = True | |
| self.llm = OpenAI( | |
| temperature=0.7, | |
| max_tokens=200, | |
| openai_api_key=openai_api_key | |
| ) | |
| # Enhanced description prompt | |
| self.description_prompt = PromptTemplate( | |
| input_variables=["title", "genre_names", "original_plot", "emotion", "rating"], | |
| template=""" | |
| You are a movie enthusiast writing for someone who is currently feeling {emotion}. | |
| Movie: {title} | |
| Genres: {genre_names} | |
| Rating: {rating}/10 | |
| Original Description: {original_plot} | |
| Rewrite this movie description in 2-3 sentences to appeal specifically to someone feeling {emotion}. | |
| Make it emotionally engaging and explain why this movie would be perfect for their current mood. | |
| Focus on the emotional journey and experience they'll have watching it. | |
| Enhanced Description:""" | |
| ) | |
| # Movie reasoning prompt | |
| self.reasoning_prompt = PromptTemplate( | |
| input_variables=["title", "emotion", "confidence"], | |
| template=""" | |
| Explain in 1-2 sentences why "{title}" is specifically recommended for someone feeling {emotion} | |
| with {confidence}% confidence. Focus on the therapeutic or mood-enhancing benefits. | |
| Reasoning:""" | |
| ) | |
| self.description_chain = LLMChain(llm=self.llm, prompt=self.description_prompt) | |
| self.reasoning_chain = LLMChain(llm=self.llm, prompt=self.reasoning_prompt) | |
| # Emotion-to-genre mapping based on psychological research | |
| self.emotion_genre_map = { | |
| "happy": [35, 10749, 16], # Comedy, Romance, Animation | |
| "sad": [18, 10402], # Drama, Music | |
| "angry": [28, 53], # Action, Thriller | |
| "fear": [27, 9648], # Horror, Mystery | |
| "surprise": [878, 14], # Sci-Fi, Fantasy | |
| "disgust": [80, 9648], # Crime, Mystery | |
| "neutral": [35, 28, 18] # Comedy, Action, Drama | |
| } | |
| # Genre ID to name mapping for LangChain context | |
| self.genre_map = { | |
| 28: "Action", 12: "Adventure", 16: "Animation", 35: "Comedy", 80: "Crime", | |
| 99: "Documentary", 18: "Drama", 10751: "Family", 14: "Fantasy", 36: "History", | |
| 27: "Horror", 10402: "Music", 9648: "Mystery", 10749: "Romance", 878: "Science Fiction", | |
| 10770: "TV Movie", 53: "Thriller", 10752: "War", 37: "Western" | |
| } | |
| self.emotion_context = { | |
| "happy": "You seem to be in a great mood! Perfect time for some uplifting entertainment.", | |
| "sad": "I notice you might be feeling a bit down. Here are some movies that might help you process emotions or lift your spirits.", | |
| "angry": "Feeling intense? These high-energy films might be just what you need.", | |
| "fear": "In the mood for some thrills? These carefully selected films will give you the right amount of suspense.", | |
| "surprise": "You look intrigued! These mind-bending films will keep you guessing.", | |
| "disgust": "Looking for something different? These compelling stories might change your perspective.", | |
| "neutral": "Ready for some great cinema? Here are some universally acclaimed films." | |
| } | |
| def detect_emotion_from_webcam(self, image): | |
| """Use the improved emotion detection system""" | |
| try: | |
| # Use the improved detector | |
| dominant_emotion, confidence, all_emotions = self.emotion_detector.detect_emotion_from_webcam_improved(image) | |
| # Convert to percentage format for compatibility | |
| emotion_percentages = {k: v * 100 for k, v in all_emotions.items()} | |
| return dominant_emotion, confidence * 100, emotion_percentages | |
| except Exception as e: | |
| print(f"Emotion detection error: {str(e)}") | |
| # Return neutral emotion as fallback | |
| return "neutral", 65.0, {"neutral": 65.0, "happy": 15.0, "sad": 8.0, "angry": 4.0, "fear": 3.0, "surprise": 3.0, "disgust": 2.0} | |
| def get_movie_recommendations_from_tmdb(self, emotion): | |
| """Get movie recommendations based on emotion using TMDB API""" | |
| try: | |
| genre_ids = self.emotion_genre_map.get(emotion.lower(), [35, 28, 18]) | |
| all_movies = [] | |
| pages_to_fetch = 5 # 5 pages x 20 results = up to 100 movies | |
| for page in range(1, pages_to_fetch + 1): | |
| params = { | |
| "api_key": self.tmdb_api_key, | |
| "with_genres": ",".join(map(str, genre_ids)), | |
| "sort_by": "popularity.desc", | |
| "vote_average.gte": 6.5, | |
| "vote_count.gte": 100, | |
| "adult": False, | |
| "page": page | |
| } | |
| response = requests.get(f"{self.tmdb_base_url}/discover/movie", params=params, timeout=10) | |
| response.raise_for_status() | |
| movies = response.json().get('results', []) | |
| all_movies.extend(movies) | |
| if len(all_movies) >= 5: | |
| return random.sample(all_movies, 5) | |
| else: | |
| return all_movies # return fewer if less than 5 found | |
| except requests.exceptions.RequestException as e: | |
| print(f"TMDB API error: {str(e)}") | |
| return self._get_fallback_movies(emotion) | |
| except Exception as e: | |
| print(f"Unexpected error fetching movies: {str(e)}") | |
| return self._get_fallback_movies(emotion) | |
| def _get_fallback_movies(self, emotion): | |
| """Fallback movie recommendations when API fails""" | |
| fallback_movies = { | |
| "happy": [ | |
| {"title": "The Pursuit of Happyness", "overview": "A heartwarming story of determination and hope.", "vote_average": 8.0, "release_date": "2006-12-15"}, | |
| {"title": "Finding Nemo", "overview": "An adventurous journey of a father fish searching for his son.", "vote_average": 8.2, "release_date": "2003-05-30"} | |
| ], | |
| "sad": [ | |
| {"title": "Inside Out", "overview": "A beautiful exploration of emotions and growing up.", "vote_average": 8.1, "release_date": "2015-06-19"}, | |
| {"title": "The Shawshank Redemption", "overview": "A story of hope and friendship in the darkest places.", "vote_average": 9.3, "release_date": "1994-09-23"} | |
| ], | |
| "angry": [ | |
| {"title": "Mad Max: Fury Road", "overview": "High-octane action in a post-apocalyptic wasteland.", "vote_average": 8.1, "release_date": "2015-05-15"}, | |
| {"title": "John Wick", "overview": "Stylized action and revenge story.", "vote_average": 7.4, "release_date": "2014-10-24"} | |
| ], | |
| "neutral": [ | |
| {"title": "Inception", "overview": "A mind-bending thriller about dreams within dreams.", "vote_average": 8.8, "release_date": "2010-07-16"}, | |
| {"title": "The Dark Knight", "overview": "A superhero film that transcends the genre.", "vote_average": 9.0, "release_date": "2008-07-18"} | |
| ] | |
| } | |
| return fallback_movies.get(emotion.lower(), fallback_movies["neutral"]) | |
| def enhance_movie_description_with_langchain(self, movie, emotion, confidence): | |
| """Use LangChain to create enhanced, emotion-specific movie descriptions""" | |
| if not self.llm_enabled: | |
| return self._create_basic_description(movie, emotion, confidence) | |
| try: | |
| title = movie.get('title', 'Unknown Title') | |
| original_plot = movie.get('overview', 'No description available.') | |
| rating = movie.get('vote_average', 0) | |
| genre_ids = movie.get('genre_ids', []) | |
| genre_names = ', '.join([self.genre_map.get(gid, 'Unknown') for gid in genre_ids]) | |
| # Generate enhanced description | |
| enhanced_description = self.description_chain.run( | |
| title=title, | |
| genre_names=genre_names, | |
| original_plot=original_plot, | |
| emotion=emotion, | |
| rating=rating | |
| ).strip() | |
| # Generate reasoning | |
| reasoning = self.reasoning_chain.run( | |
| title=title, | |
| emotion=emotion, | |
| confidence=int(confidence) | |
| ).strip() | |
| return enhanced_description, reasoning | |
| except Exception as e: | |
| print(f"LangChain enhancement failed: {str(e)}") | |
| return self._create_basic_description(movie, emotion, confidence) | |
| def _create_basic_description(self, movie, emotion, confidence): | |
| """Fallback description when LangChain is not available""" | |
| title = movie.get('title', 'Unknown Title') | |
| original_plot = movie.get('overview', 'No description available.') | |
| genre_ids = movie.get('genre_ids', []) | |
| rating = movie.get('vote_average', 0) | |
| # Get genre names for this specific movie | |
| movie_genres = [self.genre_map.get(gid, 'Unknown') for gid in genre_ids[:3]] | |
| genre_text = ', '.join(movie_genres) if movie_genres else 'Various genres' | |
| emotion_enhancers = { | |
| "happy": "This uplifting film will complement your positive mood perfectly.", | |
| "sad": "This emotionally rich story might provide comfort and catharsis.", | |
| "angry": "This intense film can help channel your energy productively.", | |
| "fear": "This thrilling experience will give you controlled excitement.", | |
| "surprise": "This unpredictable story will keep you engaged and intrigued.", | |
| "disgust": "This compelling narrative might offer a fresh perspective.", | |
| "neutral": "This well-crafted film offers excellent entertainment value." | |
| } | |
| enhanced_description = f"{original_plot} {emotion_enhancers.get(emotion, emotion_enhancers['neutral'])}" | |
| # Create unique reasoning for each movie | |
| reasoning_templates = [ | |
| f"'{title}' is perfect for your {emotion} mood because its {genre_text.lower()} elements (rated {rating}/10) create the ideal emotional experience.", | |
| f"With a {rating}/10 rating, '{title}' offers {genre_text.lower()} storytelling that resonates perfectly with your {emotion} emotional state.", | |
| f"The {genre_text.lower()} nature of '{title}' (rated {rating}/10) makes it an excellent choice for someone feeling {emotion}.", | |
| f"'{title}' combines {genre_text.lower()} with emotional depth (rated {rating}/10), making it ideal for your current {emotion} mood.", | |
| f"Based on your {emotion} emotion, '{title}' offers the perfect {genre_text.lower()} experience with its {rating}/10 rating." | |
| ] | |
| # Use movie title hash to select consistent but different reasoning for each movie | |
| reasoning_index = hash(title) % len(reasoning_templates) | |
| reasoning = reasoning_templates[reasoning_index] | |
| return enhanced_description, reasoning | |
| def generate_personalized_recommendations(self, emotion, movies, emotion_confidence): | |
| """Generate personalized recommendation display""" | |
| context = self.emotion_context.get(emotion.lower(), self.emotion_context["neutral"]) | |
| recommendation_text = f"""π¬ **Enhanced Real-Time Emotion-Based Movie Recommendations: {emotion.title()}** | |
| {context} | |
| **Emotion Detection Confidence:** {emotion_confidence:.1f}% | |
| **Detection Method:** Improved DeepFace + OpenCV + Temporal Smoothing + Ensemble Analysis | |
| **My Top Picks for You:** | |
| """ | |
| selected_movies = random.sample(movies, min(5, len(movies))) | |
| for i, movie in enumerate(selected_movies, 1): | |
| title = movie.get('title', 'Unknown Title') | |
| rating = movie.get('vote_average', 0) | |
| poster_path = movie.get('poster_path', '') | |
| release_date = movie.get('release_date', 'Unknown') | |
| # Get enhanced description and reasoning | |
| enhanced_description, reasoning = self.enhance_movie_description_with_langchain(movie, emotion, emotion_confidence) | |
| poster_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else "" | |
| year = release_date.split('-')[0] if release_date and release_date != 'Unknown' else 'Unknown' | |
| recommendation_text += f""" | |
| <details style="margin: 15px 0; border: 2px solid #404040; border-radius: 12px; padding: 0; background: linear-gradient(135deg, #2a2a2a, #1f1f1f); box-shadow: 0 4px 12px rgba(0,0,0,0.3);"> | |
| <summary style="cursor: pointer; font-size: 18px; font-weight: bold; padding: 15px 20px; background: linear-gradient(135deg, #4a90e2, #357abd); color: white; border-radius: 10px; margin: 0;"> | |
| π¬ {i}. {title} ({year}) β {rating}/10 | |
| </summary> | |
| <div style="padding: 20px;"> | |
| """ | |
| if poster_url: | |
| recommendation_text += f""" | |
| <div style="text-align: center; margin: 20px 0;"> | |
| <img src="{poster_url}" alt="{title} Poster" style="width: 200px; height: 300px; object-fit: cover; border-radius: 12px; box-shadow: 0 8px 20px rgba(0,0,0,0.5);"> | |
| </div> | |
| """ | |
| recommendation_text += f""" | |
| <div style="background: linear-gradient(135deg, #333333, #2a2a2a); padding: 20px; border-radius: 10px; border-left: 4px solid #4a90e2;"> | |
| <h4 style="color: #4a90e2; margin-top: 0;">π€ {"AI-Enhanced" if self.llm_enabled else "Emotion-Matched"} Description:</h4> | |
| <p style="line-height: 1.7; color: #e0e0e0; margin: 0 0 15px 0; font-style: italic; background: rgba(74, 144, 226, 0.1); padding: 10px; border-radius: 5px;">{enhanced_description}</p> | |
| <h4 style="color: #4a90e2; margin-top: 15px;">π‘ Perfect for Your Current Mood:</h4> | |
| <p style="line-height: 1.6; color: #c0c0c0; margin: 0;">{reasoning}</p> | |
| </div> | |
| </div> | |
| </details> | |
| """ | |
| recommendation_text += f""" | |
| --- | |
| <div style="background: linear-gradient(135deg, #2a2a2a, #1f1f1f); padding: 20px; border-radius: 10px; border: 1px solid #404040; margin-top: 20px;"> | |
| <p style="color: #e0e0e0; margin: 0 0 10px 0;">π€ <strong style="color: #4a90e2;">Enhanced Tech Stack:</strong> Improved DeepFace emotion recognition with ensemble detection, OpenCV preprocessing, temporal smoothing, TMDB API{", LangChain + OpenAI for AI enhancement" if self.llm_enabled else ""}</p> | |
| <p style="color: #e0e0e0; margin: 0;">π <strong style="color: #4a90e2;">Advanced Emotion Analysis:</strong> Detected {emotion} with {emotion_confidence:.1f}% confidence using enhanced multi-backend detection</p> | |
| </div> | |
| """ | |
| return recommendation_text | |
| def analyze_emotion_and_recommend(image): | |
| """Main function to process webcam input and generate recommendations""" | |
| if image is None: | |
| return "Please capture an image from your webcam first!", "", "β No image provided" | |
| try: | |
| bot = EmotionMovieBot() | |
| # Detect emotion from webcam input using improved method | |
| emotion, raw_confidence, all_emotions = bot.detect_emotion_from_webcam(image) | |
| # Normalize emotion scores for better confidence calculation | |
| total_score = sum(all_emotions.values()) | |
| normalized_emotions = {k: v / total_score if total_score > 0 else 0.0 for k, v in all_emotions.items()} | |
| confidence = normalized_emotions[emotion] / 100 if emotion in normalized_emotions else raw_confidence / 100 | |
| # Get image quality feedback | |
| img_array = np.array(image.convert('RGB')) if hasattr(image, 'convert') else np.array(image) | |
| quality_issues = bot.emotion_detector.analyze_image_quality(img_array) | |
| # Get movie recommendations from TMDB | |
| movies = bot.get_movie_recommendations_from_tmdb(emotion) | |
| if not movies: | |
| return "Error fetching movie recommendations", "Unable to get recommendations from TMDB API", "β API Error" | |
| # Generate personalized recommendations | |
| recommendation = bot.generate_personalized_recommendations(emotion, movies, confidence * 100) | |
| # Create enhanced emotion analysis display | |
| emotion_analysis = f"""**Enhanced Real-Time Emotion Detection Results:** | |
| **Primary Emotion: {emotion.title()}** (Confidence: {confidence * 100:.1f}%) | |
| **Detection Enhancements Applied:** | |
| - β Image preprocessing with CLAHE enhancement | |
| - β Multi-method face detection (frontal + profile) | |
| - β Ensemble detection with multiple backends | |
| - β Temporal smoothing across frames | |
| - β Confidence-based filtering | |
| **Detailed Emotion Analysis:** | |
| """ | |
| for emo, score in sorted(normalized_emotions.items(), key=lambda x: x[1], reverse=True): | |
| bar_length = int(score * 20) # Visual bar representation | |
| bar = "β" * bar_length + "β" * (20 - bar_length) | |
| emotion_analysis += f"- {emo.title()}: {score * 100:.1f}% {bar}\n" | |
| status = f"β Successfully analyzed emotion and generated recommendations! Detected: {emotion.title()} ({confidence * 100:.1f}%)" | |
| return emotion_analysis, recommendation, status | |
| except ValueError as e: | |
| if "TMDB_API_KEY" in str(e): | |
| return "Configuration Error", "TMDB API key is not configured. Please set the TMDB_API_KEY environment variable.", "β Configuration Error" | |
| else: | |
| return "Error", str(e), f"β Error: {str(e)}" | |
| except Exception as e: | |
| error_msg = f"β Unexpected error: {str(e)}" | |
| print(f"Full error: {e}") | |
| return "Error in processing", "Unable to analyze emotion or generate recommendations", error_msg | |
| # Create the Gradio interface | |
| def create_gradio_interface(): | |
| custom_theme = gr.themes.Soft( | |
| primary_hue="indigo", | |
| font=fonts.GoogleFont("Press Start 2P") | |
| ) | |
| """Create and configure the Gradio interface""" | |
| with gr.Blocks(title="π¬ Real-Time Emotion-Based Movie Recommender", theme=custom_theme) as app: | |
| gr.Markdown(""" | |
| # π¬ ABSOLUTE CINEMA | |
| ### *Blume AI - Team 5* | |
| This system uses **DeepFace** for facial emotion recognition, **OpenCV** for real-time webcam processing, | |
| **TMDB API** for movie data, and **LangChain** for AI-enhanced descriptions. | |
| **How it works:** | |
| 1. πΈ Capture your facial expression using webcam | |
| 2. π§ AI analyzes your emotion using DeepFace | |
| 3. π¬ System maps emotions to movie genres | |
| 4. π€ LangChain generates personalized descriptions | |
| 5. β¨ Get movies perfectly matched to your current mood! | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### πΈ Real-Time Emotion Capture") | |
| webcam_input = gr.Image( | |
| type="pil", | |
| label="Webcam Input for Emotion Detection", | |
| height=300, | |
| sources=["webcam"] # Only webcam for real-time capture | |
| ) | |
| analyze_button = gr.Button( | |
| "π Analyze Emotion & Get Movie Recommendations", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| status_output = gr.Textbox( | |
| label="π System Status", | |
| value="Ready for real-time emotion analysis! Capture your image using webcam.", | |
| interactive=False | |
| ) | |
| emotion_display = gr.Textbox( | |
| label="π Current Emotion Status", | |
| value="No emotion detected yet - capture an image to start!", | |
| interactive=False | |
| ) | |
| with gr.Column(scale=2): | |
| gr.Markdown("### π¬ Your Personalized Movie Recommendations") | |
| emotion_analysis_output = gr.Markdown( | |
| value="Capture an image to see detailed emotion analysis with confidence scores...", | |
| label="Emotion Analysis Results" | |
| ) | |
| movie_recommendations_output = gr.Markdown( | |
| value="Your emotion-based movie recommendations will appear here after analysis...", | |
| label="Personalized Movie Recommendations" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown(""" | |
| ### π§ System Information & Tech Stack | |
| **Emotion Detection:** DeepFace (Facial Expression Analysis) + OpenCV (Real-time Processing) | |
| **Movie Data Source:** The Movie Database (TMDB) API | |
| **AI Enhancement:** LangChain + OpenAI (Emotion-specific descriptions) | |
| **Supported Emotions:** Happy, Sad, Angry, Fear, Surprise, Disgust, Neutral | |
| **Privacy:** All processing is real-time and images are not stored | |
| **Note:** Ensure good lighting for best emotion detection results | |
| """) | |
| # Event handlers | |
| analyze_button.click( | |
| fn=analyze_emotion_and_recommend, | |
| inputs=[webcam_input], | |
| outputs=[emotion_analysis_output, movie_recommendations_output, status_output] | |
| ) | |
| webcam_input.change( | |
| fn=lambda img: "Image captured - ready for emotion analysis!" if img else "No image captured yet", | |
| inputs=[webcam_input], | |
| outputs=[emotion_display] | |
| ) | |
| app.load( | |
| fn=lambda: "System ready! Capture your image to start real-time emotion-based movie recommendations π¬π", | |
| outputs=[emotion_display] | |
| ) | |
| return app | |
| # Launch the application | |
| if __name__ == "__main__": | |
| try: | |
| app = create_gradio_interface() | |
| app.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=True, | |
| show_error=True | |
| ) | |
| except Exception as e: | |
| print(f"Failed to launch application: {e}") | |
| print("Please ensure all environment variables are set correctly:") | |
| print("- TMDB_API_KEY: Your TMDB API key") | |
| print("- OPENAI_API_KEY: Your OpenAI API key (optional, for AI enhancement)") |