jfforero commited on
Commit
cee185a
·
verified ·
1 Parent(s): 445b628

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -6
app.py CHANGED
@@ -110,14 +110,41 @@ def analyze_sentiment(text):
110
  print("Error analyzing sentiment:", e)
111
  return "neutral", 0.0
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  # Function to generate music with MusicGen (using acoustic emotion prediction)
114
  def generate_music(transcribed_text, emotion_prediction):
115
  try:
116
  if processor is None or music_model is None:
117
  return None
118
 
119
- # Create a prompt that combines the acoustic emotion and transcription
120
- prompt = f"Background music that is {emotion_prediction} and represents: {transcribed_text}"
121
 
122
  # Limit prompt length to avoid model issues
123
  if len(prompt) > 200:
@@ -157,8 +184,8 @@ def generate_image(sentiment_prediction, transcribed_text):
157
  # fallback white image if no API key
158
  return Image.new('RGB', (1024, 512), color='white')
159
 
160
- # Create the prompt for text2img using SENTIMENT analysis instead of acoustic emotion
161
- prompt = f"Generate an equirectangular 360 image texture with {sentiment_prediction} sentiment, representing the idea of: [{transcribed_text}]."
162
 
163
  # Make request to DeepAI text2img API
164
  response = requests.post(
@@ -257,10 +284,10 @@ def get_predictions(audio_input):
257
  # Analyze sentiment of transcribed text (for image)
258
  sentiment, polarity = analyze_sentiment(transcribed_text)
259
 
260
- # Generate image using SENTIMENT analysis
261
  image = generate_image(sentiment, transcribed_text)
262
 
263
- # Generate music using ACOUSTIC EMOTION prediction
264
  music_path = generate_music(transcribed_text, emotion_prediction)
265
 
266
  # Create visualization with both texture and sphere
 
110
  print("Error analyzing sentiment:", e)
111
  return "neutral", 0.0
112
 
113
+ # Function to get image prompt based on sentiment
114
+ def get_image_prompt(sentiment, transcribed_text):
115
+ if sentiment == "positive":
116
+ return f"Generate a vibrant, uplifting equirectangular 360 image texture with bright colors, joyful atmosphere, and optimistic vibes representing: [{transcribed_text}]. The scene should evoke happiness and positivity."
117
+
118
+ elif sentiment == "negative":
119
+ return f"Generate a moody, dramatic equirectangular 360 image texture with dark tones, intense atmosphere, and emotional depth representing: [{transcribed_text}]. The scene should convey melancholy and intensity."
120
+
121
+ else: # neutral
122
+ return f"Generate a balanced, serene equirectangular 360 image texture with harmonious colors, peaceful atmosphere, and calm vibes representing: [{transcribed_text}]. The scene should evoke tranquility and balance."
123
+
124
+ # Function to get music prompt based on emotion
125
+ def get_music_prompt(emotion, transcribed_text):
126
+ emotion_prompts = {
127
+ 'neutral': f"Create ambient, background music with neutral tones, subtle melodies, and unobtrusive atmosphere that complements: {transcribed_text}. The music should be calm and balanced.",
128
+ 'calm': f"Generate soothing, peaceful music with gentle melodies, soft instrumentation, and relaxing vibes that represents: {transcribed_text}. The music should evoke tranquility and serenity.",
129
+ 'happy': f"Create joyful, upbeat music with cheerful melodies, bright instrumentation, and energetic rhythms that celebrates: {transcribed_text}. The music should evoke happiness and positivity.",
130
+ 'sad': f"Generate emotional, melancholic music with poignant melodies, soft strings, and heartfelt atmosphere that reflects: {transcribed_text}. The music should evoke sadness and reflection.",
131
+ 'angry': f"Create intense, powerful music with driving rhythms, aggressive instrumentation, and strong dynamics that expresses: {transcribed_text}. The music should evoke anger and intensity.",
132
+ 'fearful': f"Generate suspenseful, tense music with eerie melodies, atmospheric sounds, and unsettling vibes that represents: {transcribed_text}. The music should evoke fear and anticipation.",
133
+ 'disgust': f"Create dark, unsettling music with dissonant harmonies, unusual sounds, and uncomfortable atmosphere that reflects: {transcribed_text}. The music should evoke discomfort and unease.",
134
+ 'surprised': f"Generate dynamic, unexpected music with sudden changes, playful melodies, and surprising elements that represents: {transcribed_text}. The music should evoke surprise and wonder."
135
+ }
136
+
137
+ return emotion_prompts.get(emotion.lower(),
138
+ f"Create background music with {emotion} atmosphere that represents: {transcribed_text}")
139
+
140
  # Function to generate music with MusicGen (using acoustic emotion prediction)
141
  def generate_music(transcribed_text, emotion_prediction):
142
  try:
143
  if processor is None or music_model is None:
144
  return None
145
 
146
+ # Get specific prompt based on emotion
147
+ prompt = get_music_prompt(emotion_prediction, transcribed_text)
148
 
149
  # Limit prompt length to avoid model issues
150
  if len(prompt) > 200:
 
184
  # fallback white image if no API key
185
  return Image.new('RGB', (1024, 512), color='white')
186
 
187
+ # Get specific prompt based on sentiment
188
+ prompt = get_image_prompt(sentiment_prediction, transcribed_text)
189
 
190
  # Make request to DeepAI text2img API
191
  response = requests.post(
 
284
  # Analyze sentiment of transcribed text (for image)
285
  sentiment, polarity = analyze_sentiment(transcribed_text)
286
 
287
+ # Generate image using SENTIMENT analysis with specific prompt
288
  image = generate_image(sentiment, transcribed_text)
289
 
290
+ # Generate music using ACOUSTIC EMOTION prediction with specific prompt
291
  music_path = generate_music(transcribed_text, emotion_prediction)
292
 
293
  # Create visualization with both texture and sphere