|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
import torch |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
model_name = "Aakash22134/bert-twitter-sentiment-classifier" |
|
|
|
|
|
print("Loading model...") |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
|
print("Model loaded successfully!") |
|
|
|
|
|
|
|
|
id2label = { |
|
|
0: "Sadness π’", |
|
|
1: "Joy π", |
|
|
2: "Love β€οΈ", |
|
|
3: "Anger π ", |
|
|
4: "Fear π¨", |
|
|
5: "Surprise π²" |
|
|
} |
|
|
|
|
|
def predict_emotion(text): |
|
|
""" |
|
|
Predict emotion from text |
|
|
""" |
|
|
if not text.strip(): |
|
|
return {label: 0.0 for label in id2label.values()} |
|
|
|
|
|
|
|
|
inputs = tokenizer( |
|
|
text, |
|
|
return_tensors="pt", |
|
|
truncation=True, |
|
|
max_length=512, |
|
|
padding=True |
|
|
) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) |
|
|
probabilities = predictions[0].numpy() |
|
|
|
|
|
|
|
|
results = {id2label[i]: float(probabilities[i]) for i in range(len(probabilities))} |
|
|
|
|
|
return results |
|
|
|
|
|
|
|
|
examples = [ |
|
|
["I just got promoted at work! This is the best day ever!"], |
|
|
["I can't believe they cancelled my favorite show. I'm so upset right now."], |
|
|
["Missing you so much. Can't wait to see you again."], |
|
|
["Why does everything always go wrong for me? This is so frustrating!"], |
|
|
["I'm really worried about the test tomorrow. What if I fail?"], |
|
|
["Oh wow! I never expected this to happen!"] |
|
|
] |
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=predict_emotion, |
|
|
inputs=gr.Textbox( |
|
|
label="Enter your text", |
|
|
placeholder="Type something here...", |
|
|
lines=3 |
|
|
), |
|
|
outputs=gr.Label(label="Emotion Prediction", num_top_classes=6), |
|
|
title="π Twitter Sentiment Classifier", |
|
|
description=""" |
|
|
This AI model analyzes text and predicts the underlying emotion. |
|
|
It can detect 6 different emotions: **Sadness, Joy, Love, Anger, Fear, and Surprise**. |
|
|
|
|
|
**How to use:** Simply type or paste any text and the model will predict the emotion! |
|
|
""", |
|
|
examples=examples, |
|
|
theme=gr.themes.Soft(), |
|
|
article=""" |
|
|
### About This Model |
|
|
- **Base Model**: BERT (bert-base-uncased) |
|
|
- **Task**: Multi-class emotion classification |
|
|
- **Test Accuracy**: 90.06% |
|
|
- **Training Data**: 16,000 emotion-labeled tweets |
|
|
- **Classes**: Sadness, Joy, Love, Anger, Fear, Surprise |
|
|
|
|
|
### Performance Metrics |
|
|
| Emotion | Precision | Recall | F1-Score | |
|
|
|---------|-----------|--------|----------| |
|
|
| Sadness | 0.93 | 0.95 | 0.94 | |
|
|
| Joy | 0.92 | 0.91 | 0.92 | |
|
|
| Love | 0.76 | 0.75 | 0.76 | |
|
|
| Anger | 0.91 | 0.91 | 0.91 | |
|
|
| Fear | 0.89 | 0.88 | 0.88 | |
|
|
| Surprise | 0.75 | 0.72 | 0.74 | |
|
|
|
|
|
### Limitations |
|
|
- Works best with English text |
|
|
- Trained on Twitter/social media style text |
|
|
- May not perform as well on formal or technical text |
|
|
- Shorter texts (like tweets) work better than very long texts |
|
|
""" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |