import streamlit as st import requests import pandas as pd import plotly.express as px import plotly.graph_objects as go from datetime import datetime import os # Page config st.set_page_config( page_title="Audio Sentiment Analysis", page_icon="🎤", layout="wide" ) # Title st.title("🎤 Audio Sentiment Analysis Dashboard") st.markdown("Analyze emotions from audio files with timeline visualization") # Flask API URL FLASK_URL = os.getenv("FLASK_URL", "http://localhost:5000") # Create tabs tab1, tab2 = st.tabs(["📁 File Analysis", "🎙️ Audio Recording"]) # ============================================ # TAB 1: File Analysis # ============================================ with tab1: st.header("📁 File Analysis") st.markdown("Upload a pre-recorded audio file for sentiment analysis") # File selection option file_option = st.radio( "Choose audio source:", options=["📁 Upload Your File", "🎯 Try Example File"], horizontal=True, help="Select whether to upload your own file or use the example" ) audio_file = None file_name = None # Upload or Example file logic if file_option == "📁 Upload Your File": uploaded_file = st.file_uploader( "Choose an audio file", type=["wav", "mp3", "ogg", "flac", "m4a"], help="Supported formats: WAV, MP3, OGG, FLAC, M4A" ) if uploaded_file is not None: audio_file = uploaded_file file_name = uploaded_file.name st.success(f"✅ File uploaded: {uploaded_file.name}") else: # Example file example_path = "input/test.wav" if os.path.exists(example_path): audio_file = open(example_path, 'rb') file_name = "test.wav" st.info("📌 Using example audio file: test.wav") else: st.warning("⚠️ Example file not found in input/ folder") # Show analyze button analyze_btn = st.button("🔍 Analyze Audio", type="primary", width="stretch", disabled=(audio_file is None)) # Initialize session state for results if 'analysis_results' not in st.session_state: st.session_state.analysis_results = None if 'job_id' not in st.session_state: st.session_state.job_id = None # Display audio player and file info if file is selected if audio_file is not None: # Audio player st.subheader("🎵 Audio Preview") st.audio(audio_file) # File info with st.expander("📊 File Information"): col1, col2, col3 = st.columns(3) with col1: st.metric("File Name", file_name) with col2: if hasattr(audio_file, 'size'): st.metric("File Size", f"{audio_file.size / 1024:.2f} KB") else: st.metric("File Size", "N/A") with col3: if hasattr(audio_file, 'type'): st.metric("File Type", audio_file.type) else: st.metric("File Type", "WAV") # Analysis Results Section if analyze_btn and audio_file: # Upload file to Flask API try: # Prepare file for upload if file_option == "📁 Upload Your File": files = {'file': (file_name, audio_file, 'audio/wav')} else: # For example file, need to reset file pointer audio_file.seek(0) files = {'file': (file_name, audio_file, 'audio/wav')} # Upload to Flask with st.spinner("📤 Uploading audio file..."): upload_response = requests.post( f"{FLASK_URL}/upload", files=files ) if upload_response.status_code == 202: job_data = upload_response.json() job_id = job_data['job_id'] st.session_state.job_id = job_id # Poll for status progress_bar = st.progress(0) status_text = st.empty() import time max_attempts = 60 # 60 attempts = 2 minutes max attempt = 0 while attempt < max_attempts: # Check status status_response = requests.get(f"{FLASK_URL}/status/{job_id}") if status_response.status_code == 200: status_data = status_response.json() progress = status_data['progress'] message = status_data['message'] status = status_data['status'] # Update progress progress_bar.progress(progress / 100) status_text.text(f"⚙️ {message} ({progress}%)") # Check if completed if status == "completed": st.session_state.analysis_results = status_data['results'] progress_bar.progress(100) status_text.empty() st.success("✅ Analysis Complete!") break elif status == "failed": error_msg = status_data.get('error', 'Unknown error') st.error(f"❌ Processing failed: {error_msg}") progress_bar.empty() status_text.empty() break # Wait before next poll time.sleep(5) attempt += 1 if attempt >= max_attempts: st.error("⏱️ Processing timeout. Please try again.") else: st.error(f"❌ Upload failed: {upload_response.json().get('error', 'Unknown error')}") except requests.exceptions.ConnectionError: st.error("❌ Could not connect to Flask server. Make sure it's running on port 5000!") except Exception as e: st.error(f"❌ An error occurred: {str(e)}") # Display results if available if st.session_state.analysis_results: # Results layout st.markdown("---") st.subheader("📊 Emotion Analysis Results") # Get results from session state results = st.session_state.analysis_results # Emotion emoji mapping (supports all emotions) emotion_emoji_map = { 'Happy': '😊', 'Sad': '😢', 'Angry': '😡', 'Neutral': '😐', 'Fear': '😨', 'Surprise': '😲', 'Disgust': '🤢', 'Calm': '😌' } # Convert timeline to DataFrame timeline_data = results['timeline'] sample_timeline = pd.DataFrame(timeline_data) sample_timeline.rename(columns={'time': 'Time (s)'}, inplace=True) sample_timeline.rename(columns={'emotion': 'Emotion'}, inplace=True) sample_timeline.rename(columns={'confidence': 'Confidence'}, inplace=True) # Add emoji column sample_timeline['Emoji'] = sample_timeline['Emotion'].map(emotion_emoji_map) # Calculate metrics total_duration = results['duration'] unique_emotions = results['emotions_detected'] dominant_emotion = results['dominant_emotion'] dominant_emoji = emotion_emoji_map[dominant_emotion] # Metrics col1, col2, col3 = st.columns(3) with col1: st.metric("Total Duration", total_duration, help="Audio length") with col2: st.metric("Emotions Detected", unique_emotions, help="Number of unique emotions") with col3: st.metric("Dominant Emotion", f"{dominant_emoji} {dominant_emotion}", help="Most frequent emotion") st.markdown("---") # Layout: Timeline and Pie Chart col1, col2 = st.columns([2, 1]) with col1: st.subheader("⏱️ Emotion Timeline") # Color mapping (supports all emotions) colors = { 'Happy': '#FFD700', 'Sad': '#4169E1', 'Angry': '#DC143C', 'Neutral': '#808080', 'Fear': '#9370DB', 'Surprise': '#FF8C00', 'Disgust': '#32CD32', 'Calm': '#87CEEB' } # Create bar chart with individual bars (not grouped) fig_timeline = go.Figure() # Add all bars in sequence bar_colors = [colors[emotion] for emotion in sample_timeline['Emotion']] bar_text = [emotion_emoji_map[emotion] for emotion in sample_timeline['Emotion']] fig_timeline.add_trace(go.Bar( x=sample_timeline['Time (s)'], y=sample_timeline['Confidence'], marker_color=bar_colors, text=bar_text, textposition='outside', textfont=dict(size=20), hovertemplate='%{x}
Confidence: %{y:.2%}
', showlegend=False )) fig_timeline.update_layout( xaxis_title="Time", yaxis_title="Confidence", yaxis_range=[0, 1.1], height=400, hovermode='x' ) st.plotly_chart(fig_timeline, width="stretch") with col2: st.subheader("📊 Distribution") # Pie chart for emotion distribution emotion_counts = sample_timeline['Emotion'].value_counts() fig_pie = go.Figure(data=[go.Pie( labels=[f"{emotion_emoji_map[e]} {e}" for e in emotion_counts.index], values=emotion_counts.values, marker=dict(colors=[colors[e] for e in emotion_counts.index]), textinfo='percent+label', textfont=dict(size=12), hole=0.3 )]) fig_pie.update_layout( height=400, showlegend=False ) st.plotly_chart(fig_pie, width="stretch") # Detailed Timeline Table st.subheader("📋 Detailed Timeline") display_df = sample_timeline[['Time (s)', 'Emoji', 'Emotion', 'Confidence']].copy() display_df['Confidence'] = display_df['Confidence'].apply(lambda x: f"{x:.2%}") st.dataframe( display_df, width="stretch", hide_index=True ) # ============================================ # TAB 2: Audio Input Analysis (Live Recording) # ============================================ with tab2: st.header("🎙️ Audio Recording Analysis") st.markdown("Record audio from your microphone for real-time sentiment analysis") # Initialize session state for Tab 2 if 'tab2_results' not in st.session_state: st.session_state.tab2_results = None # Audio recorder widget audio_data = st.audio_input("Record your audio") audio_filename = "recorded_audio.wav" if audio_data: st.success("✅ Recording complete! You can now analyze it.") # Show audio player if available if audio_data: st.subheader("🎵 Audio Preview") st.audio(audio_data) # Analyze button analyze_btn_tab2 = st.button( "🔍 Analyze Audio", type="primary", width="stretch", disabled=(audio_data is None), key="analyze_tab2" ) # Analysis process if analyze_btn_tab2 and audio_data: try: # Prepare file for upload if hasattr(audio_data, 'seek'): audio_data.seek(0) files = {'file': (audio_filename, audio_data, 'audio/wav')} # Upload to Flask with st.spinner("📤 Uploading audio..."): upload_response = requests.post( f"{FLASK_URL}/upload", files=files ) if upload_response.status_code == 202: job_data = upload_response.json() job_id = job_data['job_id'] # Poll for status progress_bar = st.progress(0) status_text = st.empty() import time max_attempts = 60 attempt = 0 while attempt < max_attempts: status_response = requests.get(f"{FLASK_URL}/status/{job_id}") if status_response.status_code == 200: status_data = status_response.json() progress = status_data['progress'] message = status_data['message'] status = status_data['status'] progress_bar.progress(progress / 100) status_text.text(f"⚙️ {message} ({progress}%)") if status == "completed": st.session_state.tab2_results = status_data['results'] progress_bar.progress(100) status_text.empty() st.success("✅ Analysis Complete!") break elif status == "failed": error_msg = status_data.get('error', 'Unknown error') st.error(f"❌ Processing failed: {error_msg}") progress_bar.empty() status_text.empty() break time.sleep(5) attempt += 1 if attempt >= max_attempts: st.error("⏱️ Processing timeout. Please try again.") else: st.error(f"❌ Upload failed: {upload_response.json().get('error', 'Unknown error')}") except requests.exceptions.ConnectionError: st.error("❌ Could not connect to Flask server. Make sure it's running on port 5000!") except Exception as e: st.error(f"❌ An error occurred: {str(e)}") # Display results if available if st.session_state.tab2_results: results = st.session_state.tab2_results st.markdown("---") st.subheader("📊 Emotion Analysis Results") # Emotion emoji mapping emotion_emoji_map = { 'Happy': '😊', 'Sad': '😢', 'Angry': '😡', 'Neutral': '😐', 'Fear': '😨', 'Surprise': '😲', 'Disgust': '🤢', 'Calm': '😌' } # Convert timeline to DataFrame timeline_data = results['timeline'] sample_data = pd.DataFrame(timeline_data) sample_data.rename(columns={'time': 'Time (s)', 'emotion': 'Emotion', 'confidence': 'Confidence'}, inplace=True) # Add emoji column sample_data['Emoji'] = sample_data['Emotion'].map(emotion_emoji_map) # Metrics total_duration = results['duration'] unique_emotions = results['emotions_detected'] dominant_emotion = results['dominant_emotion'] dominant_emoji = emotion_emoji_map.get(dominant_emotion, '❓') col1, col2, col3 = st.columns(3) with col1: st.metric("Audio Duration", total_duration, help="Length of audio") with col2: st.metric("Emotions Detected", unique_emotions, help="Number of unique emotions") with col3: st.metric("Dominant Emotion", f"{dominant_emoji} {dominant_emotion}", help="Most frequent emotion") st.markdown("---") # Layout: Timeline and Pie Chart col1, col2 = st.columns([2, 1]) with col1: st.subheader("⏱️ Emotion Timeline") # Color mapping colors = { 'Happy': '#FFD700', 'Sad': '#4169E1', 'Angry': '#DC143C', 'Neutral': '#808080', 'Fear': '#9370DB', 'Surprise': '#FF8C00', 'Disgust': '#32CD32', 'Calm': '#87CEEB' } # Create bar chart bar_colors = [colors.get(emotion, '#808080') for emotion in sample_data['Emotion']] bar_text = [emotion_emoji_map.get(emotion, '❓') for emotion in sample_data['Emotion']] fig_timeline = go.Figure() fig_timeline.add_trace(go.Bar( x=sample_data['Time (s)'], y=sample_data['Confidence'], marker_color=bar_colors, text=bar_text, textposition='outside', textfont=dict(size=20), hovertemplate='%{x}
Confidence: %{y:.2%}
', showlegend=False )) fig_timeline.update_layout( xaxis_title="Time", yaxis_title="Confidence", yaxis_range=[0, 1.1], height=400, hovermode='x' ) st.plotly_chart(fig_timeline, width="stretch") with col2: st.subheader("📊 Distribution") # Pie chart emotion_counts = sample_data['Emotion'].value_counts() fig_pie = go.Figure(data=[go.Pie( labels=[f"{emotion_emoji_map.get(e, '❓')} {e}" for e in emotion_counts.index], values=emotion_counts.values, marker=dict(colors=[colors.get(e, '#808080') for e in emotion_counts.index]), textinfo='percent+label', textfont=dict(size=12), hole=0.3 )]) fig_pie.update_layout( height=400, showlegend=False ) st.plotly_chart(fig_pie, width="stretch") # Detailed Timeline Table st.subheader("📋 Detailed Timeline") display_df = sample_data[['Time (s)', 'Emoji', 'Emotion', 'Confidence']].copy() display_df['Confidence'] = display_df['Confidence'].apply(lambda x: f"{x:.2%}") st.dataframe( display_df, width="stretch", hide_index=True ) # Footer st.markdown("---") st.caption("🔧 Powered by Flask + Streamlit | Audio Sentiment Analysis")