|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
|
|
|
|
|
|
text_emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base") |
|
|
audio_emotion_model = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") |
|
|
|
|
|
|
|
|
emoji_map = { |
|
|
"joy": "π", "happy": "π", "anger": "π ", "angry": "π ", |
|
|
"sadness": "π’", "sad": "π’", "fear": "π¨", "calm": "π", |
|
|
"surprise": "π²", "disgust": "π€’", "neutral": "π", "boredom": "π₯±", |
|
|
"optimism": "π" |
|
|
} |
|
|
|
|
|
|
|
|
def format_emotion(label, score): |
|
|
emoji = emoji_map.get(label.lower(), "β") |
|
|
return f"{emoji} <b>{label.title()}</b> ({int(score * 100)}%)" |
|
|
|
|
|
|
|
|
def detect_emotion(mode, text, audio): |
|
|
if mode == "Text" and text: |
|
|
result = text_emotion_model(text)[0] |
|
|
return f"π£οΈ <b>Text Emotion:</b><br>{format_emotion(result['label'], result['score'])}" |
|
|
elif mode == "Audio" and audio: |
|
|
result = audio_emotion_model(audio)[0] |
|
|
return f"π§ <b>Audio Emotion:</b><br>{format_emotion(result['label'], result['score'])}" |
|
|
return "β οΈ <b>Please provide valid input.</b>" |
|
|
|
|
|
|
|
|
def clear_all(): |
|
|
return "Text", "", None, "" |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("## π Emotion Detection App (Text or Audio)") |
|
|
|
|
|
mode = gr.Radio(["Text", "Audio"], label="Choose Input Mode", value="Text") |
|
|
|
|
|
text_input = gr.Textbox(label="π¬ Enter your text", visible=True) |
|
|
audio_input = gr.Audio(type="filepath", label="π€ Record or Upload Audio", visible=False) |
|
|
|
|
|
output = gr.HTML(label="π― Output", value="") |
|
|
|
|
|
detect_btn = gr.Button("π Detect Emotion") |
|
|
clear_btn = gr.Button("β Clear Inputs") |
|
|
|
|
|
|
|
|
def toggle_inputs(mode): |
|
|
return ( |
|
|
gr.update(visible=(mode == "Text")), |
|
|
gr.update(visible=(mode == "Audio")) |
|
|
) |
|
|
mode.change(fn=toggle_inputs, inputs=mode, outputs=[text_input, audio_input]) |
|
|
|
|
|
detect_btn.click(fn=detect_emotion, inputs=[mode, text_input, audio_input], outputs=output) |
|
|
clear_btn.click(fn=clear_all, inputs=[], outputs=[mode, text_input, audio_input, output]) |
|
|
|
|
|
demo.launch() |