Spaces:
Sleeping
Sleeping
File size: 1,560 Bytes
4eaafb1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# Load your fine-tuned model from the Hugging Face Hub
model_name = "Wisaba/emotion_roberta_weighted"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Move to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Labels
emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
def classify_emotion(text):
# 1. Tokenize
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128)
inputs = {k: v.to(device) for k, v in inputs.items()}
# 2. Predict
with torch.no_grad():
outputs = model(**inputs)
# 3. Get Label
logits = outputs.logits
predicted_class_id = torch.argmax(logits, dim=-1).item()
return emotion_labels[predicted_class_id]
# Define the Gradio Interface
iface = gr.Interface(
fn=classify_emotion,
inputs=gr.Textbox(lines=2, placeholder="Type how you feel...", label="Text Input"),
outputs=gr.Textbox(label="Predicted Emotion"),
title="Emotion Analysis (RoBERTa)",
description="This model classifies text into 6 emotions: Sadness, Joy, Love, Anger, Fear, Surprise.",
examples=[
["I am feeling so lonely and sad today."],
["I'm incredibly excited about the new project!"],
["Why did you do that? I'm so mad at you!"]
]
)
# Launch
if __name__ == "__main__":
iface.launch()
|