Update app.py
Browse files
app.py
CHANGED
|
@@ -8,6 +8,7 @@ import os
|
|
| 8 |
from tensorflow.keras.models import load_model
|
| 9 |
from faster_whisper import WhisperModel
|
| 10 |
import random
|
|
|
|
| 11 |
|
| 12 |
# Load the emotion prediction model
|
| 13 |
def load_emotion_model(model_path):
|
|
@@ -66,6 +67,27 @@ def predict_emotion_from_audio(wav_filepath):
|
|
| 66 |
print("Error predicting emotion:", e)
|
| 67 |
return "Prediction error"
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
api_key = os.getenv("DeepAI_api_key")
|
| 70 |
|
| 71 |
# Function to generate an image using DeepAI Text to Image API
|
|
@@ -118,10 +140,13 @@ def get_predictions(audio_input):
|
|
| 118 |
# Handle case where emotion_prediction might be None
|
| 119 |
if emotion_prediction is None:
|
| 120 |
emotion_prediction = "Unknown"
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
| 122 |
image = generate_image(emotion_prediction, transcribed_text)
|
| 123 |
|
| 124 |
-
return emotion_prediction, transcribed_text, image
|
| 125 |
|
| 126 |
# Create the Gradio interface
|
| 127 |
interface = gr.Interface(
|
|
@@ -130,10 +155,11 @@ interface = gr.Interface(
|
|
| 130 |
outputs=[
|
| 131 |
gr.Label(label="Acoustic Prediction"),
|
| 132 |
gr.Label(label="Transcribed Text"),
|
|
|
|
| 133 |
gr.Image(type='pil', label="Generated Image")
|
| 134 |
],
|
| 135 |
title="Affective Virtual Environments",
|
| 136 |
-
description="Create an AVE using your voice."
|
| 137 |
)
|
| 138 |
|
| 139 |
interface.launch()
|
|
|
|
| 8 |
from tensorflow.keras.models import load_model
|
| 9 |
from faster_whisper import WhisperModel
|
| 10 |
import random
|
| 11 |
+
from textblob import TextBlob # Added for sentiment analysis
|
| 12 |
|
| 13 |
# Load the emotion prediction model
|
| 14 |
def load_emotion_model(model_path):
|
|
|
|
| 67 |
print("Error predicting emotion:", e)
|
| 68 |
return "Prediction error"
|
| 69 |
|
| 70 |
+
# Function to analyze sentiment from text
|
| 71 |
+
def analyze_sentiment(text):
|
| 72 |
+
try:
|
| 73 |
+
if not text or text.strip() == "":
|
| 74 |
+
return "No text to analyze", 0.0
|
| 75 |
+
|
| 76 |
+
analysis = TextBlob(text)
|
| 77 |
+
polarity = analysis.sentiment.polarity
|
| 78 |
+
|
| 79 |
+
if polarity > 0.1:
|
| 80 |
+
sentiment = "positive"
|
| 81 |
+
elif polarity < -0.1:
|
| 82 |
+
sentiment = "negative"
|
| 83 |
+
else:
|
| 84 |
+
sentiment = "neutral"
|
| 85 |
+
|
| 86 |
+
return sentiment, polarity
|
| 87 |
+
except Exception as e:
|
| 88 |
+
print("Error analyzing sentiment:", e)
|
| 89 |
+
return "sentiment analysis error", 0.0
|
| 90 |
+
|
| 91 |
api_key = os.getenv("DeepAI_api_key")
|
| 92 |
|
| 93 |
# Function to generate an image using DeepAI Text to Image API
|
|
|
|
| 140 |
# Handle case where emotion_prediction might be None
|
| 141 |
if emotion_prediction is None:
|
| 142 |
emotion_prediction = "Unknown"
|
| 143 |
+
|
| 144 |
+
# Analyze sentiment of transcribed text
|
| 145 |
+
sentiment, polarity = analyze_sentiment(transcribed_text)
|
| 146 |
+
|
| 147 |
image = generate_image(emotion_prediction, transcribed_text)
|
| 148 |
|
| 149 |
+
return emotion_prediction, transcribed_text, f"Sentiment: {sentiment} (Polarity: {polarity:.2f})", image
|
| 150 |
|
| 151 |
# Create the Gradio interface
|
| 152 |
interface = gr.Interface(
|
|
|
|
| 155 |
outputs=[
|
| 156 |
gr.Label(label="Acoustic Prediction"),
|
| 157 |
gr.Label(label="Transcribed Text"),
|
| 158 |
+
gr.Label(label="Sentiment Analysis"), # Added sentiment analysis output
|
| 159 |
gr.Image(type='pil', label="Generated Image")
|
| 160 |
],
|
| 161 |
title="Affective Virtual Environments",
|
| 162 |
+
description="Create an AVE using your voice. Get emotion prediction, transcription, sentiment analysis, and a generated image."
|
| 163 |
)
|
| 164 |
|
| 165 |
interface.launch()
|