import streamlit as st import requests import plotly.express as px import pandas as pd import numpy as np import os import json from audio_recorder_streamlit import audio_recorder # --- CONFIG --- # Use environment variables for deployment, fallback to localhost API_URL = os.getenv("API_URL", "http://localhost:8000/predict") HEALTH_URL = os.getenv("HEALTH_URL", "http://localhost:8000/health") st.set_page_config( page_title="VigilAudio Dashboard", layout="wide" ) # --- CUSTOM CSS --- st.markdown(""" """, unsafe_allow_html=True) # --- UI HEADER --- st.title("VigilAudio") st.caption("ML-Powered Content Safety Engine (Microservices Architecture)") st.markdown("---") # --- SYSTEM STATUS SIDEBAR --- with st.sidebar: st.header("System Health") try: health_resp = requests.get(HEALTH_URL, timeout=2) health = health_resp.json() st.success(f"Backend: {health['status'].upper()}") st.info(f"Engine: {health['engine']}") st.caption(f"Limit: {health['max_duration_limit']}s") except: st.error("Backend Server Offline") # --- INPUT TABS --- tab_upload, tab_mic = st.tabs(["Upload File", "Live Microphone"]) audio_source_bytes = None audio_source_name = "recording.wav" with tab_upload: uploaded_file = st.file_uploader("Upload Audio Content", type=["wav", "mp3", "m4a"], key="api_uploader") if uploaded_file: audio_source_bytes = uploaded_file.getvalue() audio_source_name = uploaded_file.name with tab_mic: st.write("Capture Live Audio") recorded_audio_bytes = audio_recorder(text="Start Recording", icon_size="2x", key="api_recorder") if recorded_audio_bytes: audio_source_bytes = recorded_audio_bytes audio_source_name = "mic_recording.wav" # --- MAIN CONTENT --- if audio_source_bytes is not None: st.audio(audio_source_bytes) if st.button("Analyze Content", type="primary", use_container_width=True, key="api_analyze_btn"): with st.spinner("Communicating with VigilAudio API..."): files = {"file": (audio_source_name, audio_source_bytes)} try: response = requests.post(API_URL, files=files) if response.status_code == 200: data = response.json() # Process results dominant = data['dominant_emotion'] timeline = data['timeline'] # Truncation Warning if data.get('is_truncated'): st.warning(f"Note: Audio was truncated to 60s for analysis (Original: {data['original_duration']}s)") # Moderation Logic: Flag high-confidence negative emotions flagged_emotions = ['angry', 'fearful', 'disgusted'] is_flagged = any(seg['emotion'] in flagged_emotions and seg['confidence'] > 0.85 for seg in timeline) if is_flagged: st.markdown("""
""", unsafe_allow_html=True) # Layout for charts res_col1, res_col2 = st.columns([1, 2]) color_map = { "angry": "#f48771", "happy": "#89d185", "sad": "#4fc1ff", "fearful": "#c586c0", "disgusted": "#ce9178", "neutral": "#808080", "suprised": "#dcdcaa" } with res_col1: st.subheader("Dominant Tone") color = color_map.get(dominant, "#ffffff") st.markdown(f"""