YashsharmaPhD's picture
Create app.py
a869b8a verified
import whisper # Import whisper from OpenAI
import streamlit as st
import librosa
import numpy as np
import matplotlib.pyplot as plt
from pydub import AudioSegment
from transformers import pipeline
import os
# Load pre-trained sentiment analysis model
sentiment_analyzer = pipeline("sentiment-analysis")
# Streamlit UI
st.title("🎤 Audio Sentiment Analysis")
st.write("Upload multiple MP3 files to analyze sentiment and tone.")
# Upload multiple audio files
uploaded_files = st.file_uploader("Choose MP3 files", type=["mp3"], accept_multiple_files=True)
# Function to process audio, get sentiment, and transcribe to text using Whisper
def analyze_audio(file_path):
# Convert MP3 to WAV
audio = AudioSegment.from_mp3(file_path)
wav_path = file_path.replace(".mp3", ".wav")
audio.export(wav_path, format="wav")
# Load audio
y, sr = librosa.load(wav_path, sr=None)
# Extract MFCCs (Mel-frequency cepstral coefficients)
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
mfccs_mean = np.mean(mfccs, axis=1) # Take mean across time axis
# Perform speech-to-text transcription using Whisper
model = whisper.load_model("base") # Load Whisper model (change model size as needed)
result = model.transcribe(wav_path)
transcription = result['text']
# Perform sentiment analysis on the transcription
sentiment_result = sentiment_analyzer(transcription) if transcription else [{"label": "NEGATIVE", "score": 0.5}]
# Remove WAV file after processing
os.remove(wav_path)
return sentiment_result[0], mfccs_mean, mfccs, y, sr, transcription
# Process and plot if files are uploaded
if uploaded_files:
# Create a directory to store temporary files
os.makedirs("temp", exist_ok=True)
# Prepare a single plot
fig, ax = plt.subplots(figsize=(10, 6))
for uploaded_file in uploaded_files:
# Save the uploaded file
file_path = f"temp/{uploaded_file.name}"
with open(file_path, "wb") as f:
f.write(uploaded_file.getbuffer())
# Analyze sentiment, extract features, and get transcription
sentiment, mfccs_mean, mfccs, audio_data, sample_rate, transcription = analyze_audio(file_path)
# Display sentiment and transcription result
st.subheader(f"📊 Sentiment Analysis Result for {uploaded_file.name}")
st.write(f"**Transcription:** {transcription}")
st.write(f"**Sentiment:** {sentiment['label']}")
st.write(f"**Confidence:** {sentiment['score']:.2f}")
# Normalize sentiment score to range from 0 to 1
sentiment_score = sentiment['score'] if sentiment['label'] == 'POSITIVE' else 1 - sentiment['score']
# Plotting both curves in a single plot
ax.plot(np.linspace(0, len(audio_data) / sample_rate, len(audio_data)), [sentiment_score] * len(audio_data), label="Agent's Tone", linestyle='-', color='b')
# Ensure that MFCCs are being averaged across time correctly
# Take the mean along the time axis (axis=1) to get one value per MFCC coefficient
mfccs_mean = np.mean(mfccs, axis=1)
# Plot Patient's Tone Curve (MFCC mean value per coefficient)
ax.plot(np.linspace(0, len(mfccs_mean), len(mfccs_mean)), mfccs_mean, label="Patient's Tone Curve (MFCCs)", linestyle='--', color='r')
# Customize plot
ax.set_xlabel("Time (seconds) / MFCC Coefficients")
ax.set_ylabel("Score / Mean MFCC Value")
ax.set_title("Agent Tone & Patient Tone Curve")
ax.legend()
# Show plot in Streamlit
st.pyplot(fig)
# Clean up temp files
os.remove(file_path)