raviix46 commited on
Commit
cde060c
Β·
verified Β·
1 Parent(s): 9e14cbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -2,10 +2,10 @@ import gradio as gr
2
  from transformers import pipeline
3
 
4
  # Load models
5
- text_emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=False)
6
- audio_emotion_model = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er", return_all_scores=False)
7
 
8
- # Emoji map
9
  emoji_map = {
10
  "joy": "😊", "happy": "😊", "anger": "😠", "angry": "😠",
11
  "sadness": "😒", "sad": "😒", "fear": "😨", "calm": "😌",
@@ -13,10 +13,12 @@ emoji_map = {
13
  "optimism": "πŸ™‚"
14
  }
15
 
 
16
  def format_emotion(label, score):
17
  emoji = emoji_map.get(label.lower(), "❓")
18
  return f"{emoji} <b>{label.title()}</b> ({int(score * 100)}%)"
19
 
 
20
  def detect_emotion(mode, text, audio):
21
  if mode == "Text" and text:
22
  result = text_emotion_model(text)[0]
@@ -26,30 +28,33 @@ def detect_emotion(mode, text, audio):
26
  return f"🎧 <b>Audio Emotion:</b><br>{format_emotion(result['label'], result['score'])}"
27
  return "⚠️ <b>Please provide valid input.</b>"
28
 
29
- def clear_inputs():
 
30
  return "Text", "", None, ""
31
 
 
32
  with gr.Blocks() as demo:
33
  gr.Markdown("## 🎭 Emotion Detection App (Text or Audio)")
34
 
35
  mode = gr.Radio(["Text", "Audio"], label="Choose Input Mode", value="Text")
36
 
37
- text_input = gr.Textbox(label="πŸ’¬ Enter your text", visible=True).style(container=True)
38
- audio_input = gr.Audio(type="filepath", label="🎀 Record or Upload Audio", visible=False).style(container=True)
39
 
40
- output = gr.HTML(label="🎯 Output", value="", show_label=True)
41
 
42
  detect_btn = gr.Button("πŸ” Detect Emotion")
43
  clear_btn = gr.Button("❌ Clear Inputs")
44
 
 
45
  def toggle_inputs(mode):
46
  return (
47
  gr.update(visible=(mode == "Text")),
48
  gr.update(visible=(mode == "Audio"))
49
  )
50
-
51
  mode.change(fn=toggle_inputs, inputs=mode, outputs=[text_input, audio_input])
 
52
  detect_btn.click(fn=detect_emotion, inputs=[mode, text_input, audio_input], outputs=output)
53
- clear_btn.click(fn=clear_inputs, inputs=[], outputs=[mode, text_input, audio_input, output])
54
 
55
  demo.launch()
 
2
  from transformers import pipeline
3
 
4
  # Load models
5
+ text_emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
6
+ audio_emotion_model = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er")
7
 
8
+ # Emoji mapping
9
  emoji_map = {
10
  "joy": "😊", "happy": "😊", "anger": "😠", "angry": "😠",
11
  "sadness": "😒", "sad": "😒", "fear": "😨", "calm": "😌",
 
13
  "optimism": "πŸ™‚"
14
  }
15
 
16
+ # Format output
17
  def format_emotion(label, score):
18
  emoji = emoji_map.get(label.lower(), "❓")
19
  return f"{emoji} <b>{label.title()}</b> ({int(score * 100)}%)"
20
 
21
+ # Main prediction function
22
  def detect_emotion(mode, text, audio):
23
  if mode == "Text" and text:
24
  result = text_emotion_model(text)[0]
 
28
  return f"🎧 <b>Audio Emotion:</b><br>{format_emotion(result['label'], result['score'])}"
29
  return "⚠️ <b>Please provide valid input.</b>"
30
 
31
+ # Clear inputs
32
+ def clear_all():
33
  return "Text", "", None, ""
34
 
35
+ # Build UI
36
  with gr.Blocks() as demo:
37
  gr.Markdown("## 🎭 Emotion Detection App (Text or Audio)")
38
 
39
  mode = gr.Radio(["Text", "Audio"], label="Choose Input Mode", value="Text")
40
 
41
+ text_input = gr.Textbox(label="πŸ’¬ Enter your text", visible=True)
42
+ audio_input = gr.Audio(type="filepath", label="🎀 Record or Upload Audio", visible=False)
43
 
44
+ output = gr.HTML(label="🎯 Output", value="")
45
 
46
  detect_btn = gr.Button("πŸ” Detect Emotion")
47
  clear_btn = gr.Button("❌ Clear Inputs")
48
 
49
+ # Show/hide inputs based on mode
50
  def toggle_inputs(mode):
51
  return (
52
  gr.update(visible=(mode == "Text")),
53
  gr.update(visible=(mode == "Audio"))
54
  )
 
55
  mode.change(fn=toggle_inputs, inputs=mode, outputs=[text_input, audio_input])
56
+
57
  detect_btn.click(fn=detect_emotion, inputs=[mode, text_input, audio_input], outputs=output)
58
+ clear_btn.click(fn=clear_all, inputs=[], outputs=[mode, text_input, audio_input, output])
59
 
60
  demo.launch()