Wisaba's picture
Add app.py file
4eaafb1
import gradio as gr
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# Load your fine-tuned model from the Hugging Face Hub
model_name = "Wisaba/emotion_roberta_weighted"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Move to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Labels
emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
def classify_emotion(text):
# 1. Tokenize
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128)
inputs = {k: v.to(device) for k, v in inputs.items()}
# 2. Predict
with torch.no_grad():
outputs = model(**inputs)
# 3. Get Label
logits = outputs.logits
predicted_class_id = torch.argmax(logits, dim=-1).item()
return emotion_labels[predicted_class_id]
# Define the Gradio Interface
iface = gr.Interface(
fn=classify_emotion,
inputs=gr.Textbox(lines=2, placeholder="Type how you feel...", label="Text Input"),
outputs=gr.Textbox(label="Predicted Emotion"),
title="Emotion Analysis (RoBERTa)",
description="This model classifies text into 6 emotions: Sadness, Joy, Love, Anger, Fear, Surprise.",
examples=[
["I am feeling so lonely and sad today."],
["I'm incredibly excited about the new project!"],
["Why did you do that? I'm so mad at you!"]
]
)
# Launch
if __name__ == "__main__":
iface.launch()