Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import cv2 | |
| from deepface import DeepFace | |
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer | |
| import tempfile | |
| analyzer = SentimentIntensityAnalyzer() | |
| def analyze_text(text): | |
| score = analyzer.polarity_scores(text) | |
| if score['compound'] >= 0.05: | |
| return "Positive π" | |
| elif score['compound'] <= -0.05: | |
| return "Negative π " | |
| else: | |
| return "Neutral π" | |
| def process_all(text, video): | |
| text_sentiment = analyze_sentiment(text) | |
| video_emotion = analyze_video_emotion(video) | |
| return f"Text Sentiment: {text_sentiment}\nFacial Emotion: {video_emotion}" | |
| iface = gr.Interface( | |
| fn=process_all, | |
| inputs=[gr.Textbox(label="Social Media Post"), gr.Video(label="Upload Video")], | |
| outputs="text", | |
| title="Emotion & Sentiment Analyzer" | |
| ) | |
| iface.launch() | |
| def analyze_video(video_file): | |
| if video_file is None: | |
| return "No video uploaded" | |
| # Save uploaded file temporarily | |
| temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
| with open(temp_path, "wb") as f: | |
| f.write(video_file.read()) | |
| cap = cv2.VideoCapture(temp_path) | |
| success, frame = cap.read() | |
| cap.release() | |
| def analyze_video_emotion(video_file): | |
| # Save the uploaded video to a temp file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
| tmp.write(video_file.read()) | |
| tmp_path = tmp.name | |
| cap = cv2.VideoCapture(tmp_path) | |
| emotions = [] | |
| frame_count = 0 | |
| import cv2 | |
| import tempfile | |
| from deepface import DeepFace | |
| def analyze_video_emotion(video_file): | |
| # Save the uploaded video to a temp file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
| tmp.write(video_file.read()) | |
| tmp_path = tmp.name | |
| cap = cv2.VideoCapture(tmp_path) | |
| emotions = [] | |
| frame_count = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret or frame_count > 60: # Limit to first 60 frames | |
| break | |
| try: | |
| result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False) | |
| emotions.append(result[0]['dominant_emotion']) | |
| except Exception as e: | |
| print("Error analyzing frame:", e) | |
| frame_count += 1 | |
| cap.release() | |
| if emotions: | |
| # Return most frequent emotion | |
| return max(set(emotions), key=emotions.count) | |
| else: | |
| return "No emotion detected or face not found" | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret or frame_count > 60: # Limit to 60 frames max | |
| break | |
| try: | |
| result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False) | |
| emotions.append(result[0]['dominant_emotion']) | |
| except: | |
| pass | |
| frame_count += 1 | |
| cap.release() | |
| if emotions: | |
| # Return most common emotion | |
| return max(set(emotions), key=emotions.count) | |
| else: | |
| return "No face detected" | |
| if not success: | |
| return "Could not read video" | |
| try: | |
| result = DeepFace.analyze(frame, actions=["emotion"], enforce_detection=False) | |
| return result[0]['dominant_emotion'].capitalize() | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def analyze_post(text, video): | |
| sentiment = analyze_text(text) | |
| emotion = analyze_video(video) | |
| return f"π Sentiment: {sentiment}\nπ₯ Emotion: {emotion}" | |
| import gradio as gr | |
| def analyze_text(text): | |
| from transformers import pipeline | |
| classifier = pipeline("sentiment-analysis") | |
| return classifier(text)[0]['label'] | |
| def process_all(text_input, video_input): | |
| text_result = analyze_text(text_input) | |
| video_result = analyze_video_emotion(video_input) | |
| return f"Text Sentiment: {text_result}\nFacial Emotion: {video_result}" | |
| gr.Interface( | |
| fn=process_all, | |
| inputs=[ | |
| gr.Textbox(label="Enter Social Media Text"), | |
| gr.Video(label="Upload a Video Clip") | |
| ], | |
| outputs="text", | |
| title="Emotion & Sentiment Decoder", | |
| description="Analyzes social media text & facial expressions from video." | |
| ).launch() | |
| interface = gr.Interface( | |
| fn=analyze_post, | |
| inputs=[ | |
| gr.Textbox(label="Post Text", placeholder="Enter your message here"), | |
| gr.File(label="Upload video (.mp4)", file_types=[".mp4"]) | |
| ], | |
| outputs="text", | |
| title="π± Emotion & Sentiment Analyzer", | |
| description="Analyze text sentiment and facial emotion from video. No re-running needed. Permanent on Hugging Face." | |
| ) | |
| interface.launch()import gradio as gr | |
| import cv2 | |
| from deepface import DeepFace | |
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer | |
| import tempfile | |
| analyzer = SentimentIntensityAnalyzer() | |
| def analyze_text(text): | |
| score = analyzer.polarity_scores(text) | |
| if score['compound'] >= 0.05: | |
| return "Positive π" | |
| elif score['compound'] <= -0.05: | |
| return "Negative π " | |
| else: | |
| return "Neutral π" | |
| def process_all(text, video): | |
| text_sentiment = analyze_sentiment(text) | |
| video_emotion = analyze_video_emotion(video) | |
| return f"Text Sentiment: {text_sentiment}\nFacial Emotion: {video_emotion}" | |
| iface = gr.Interface( | |
| fn=process_all, | |
| inputs=[gr.Textbox(label="Social Media Post"), gr.Video(label="Upload Video")], | |
| outputs="text", | |
| title="Emotion & Sentiment Analyzer" | |
| ) | |
| iface.launch() | |
| def analyze_video(video_file): | |
| if video_file is None: | |
| return "No video uploaded" | |
| # Save uploaded file temporarily | |
| temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
| with open(temp_path, "wb") as f: | |
| f.write(video_file.read()) | |
| cap = cv2.VideoCapture(temp_path) | |
| success, frame = cap.read() | |
| cap.release() | |
| def analyze_video_emotion(video_file): | |
| # Save the uploaded video to a temp file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
| tmp.write(video_file.read()) | |
| tmp_path = tmp.name | |
| cap = cv2.VideoCapture(tmp_path) | |
| emotions = [] | |
| frame_count = 0 | |
| import cv2 | |
| import tempfile | |
| from deepface import DeepFace | |
| def analyze_video_emotion(video_file): | |
| # Save the uploaded video to a temp file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
| tmp.write(video_file.read()) | |
| tmp_path = tmp.name | |
| cap = cv2.VideoCapture(tmp_path) | |
| emotions = [] | |
| frame_count = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret or frame_count > 60: # Limit to first 60 frames | |
| break | |
| try: | |
| result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False) | |
| emotions.append(result[0]['dominant_emotion']) | |
| except Exception as e: | |
| print("Error analyzing frame:", e) | |
| frame_count += 1 | |
| cap.release() | |
| if emotions: | |
| # Return most frequent emotion | |
| return max(set(emotions), key=emotions.count) | |
| else: | |
| return "No emotion detected or face not found" | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret or frame_count > 60: # Limit to 60 frames max | |
| break | |
| try: | |
| result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False) | |
| emotions.append(result[0]['dominant_emotion']) | |
| except: | |
| pass | |
| frame_count += 1 | |
| cap.release() | |
| if emotions: | |
| # Return most common emotion | |
| return max(set(emotions), key=emotions.count) | |
| else: | |
| return "No face detected" | |
| if not success: | |
| return "Could not read video" | |
| try: | |
| result = DeepFace.analyze(frame, actions=["emotion"], enforce_detection=False) | |
| return result[0]['dominant_emotion'].capitalize() | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def analyze_post(text, video): | |
| sentiment = analyze_text(text) | |
| emotion = analyze_video(video) | |
| return f"π Sentiment: {sentiment}\nπ₯ Emotion: {emotion}" | |
| import gradio as gr | |
| def analyze_text(text): | |
| from transformers import pipeline | |
| classifier = pipeline("sentiment-analysis") | |
| return classifier(text)[0]['label'] | |
| def process_all(text_input, video_input): | |
| text_result = analyze_text(text_input) | |
| video_result = analyze_video_emotion(video_input) | |
| return f"Text Sentiment: {text_result}\nFacial Emotion: {video_result}" | |
| gr.Interface( | |
| fn=process_all, | |
| inputs=[ | |
| gr.Textbox(label="Enter Social Media Text"), | |
| gr.Video(label="Upload a Video Clip") | |
| ], | |
| outputs="text", | |
| title="Emotion & Sentiment Decoder", | |
| description="Analyzes social media text & facial expressions from video." | |
| ).launch() | |
| interface = gr.Interface( | |
| fn=analyze_post, | |
| inputs=[ | |
| gr.Textbox(label="Post Text", placeholder="Enter your message here"), | |
| gr.File(label="Upload video (.mp4)", file_types=[".mp4"]) | |
| ], | |
| outputs="text", | |
| title="π± Emotion & Sentiment Analyzer", | |
| description="Analyze text sentiment and facial emotion from video. No re-running needed. Permanent on Hugging Face." | |
| if text_input: | |
| # Process text only | |
| elif video_input: | |
| # Process video only | |
| else: | |
| return "No input provided" | |
| ) | |
| interface.launch() |