Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| model_name = "macapa/emotion-classifier" | |
| tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| labels = {0: 'sadness', | |
| 1: 'joy', | |
| 2: 'love', | |
| 3: 'anger', | |
| 4: 'fear', | |
| 5: 'surprise'} | |
| def predict(text): | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| inputs = inputs.to(device) | |
| outputs = model(**inputs) | |
| predictions = torch.argmax(outputs.logits, dim=1) | |
| label = labels[predictions.item()] | |
| return label | |
| # Create the Gradio interface | |
| iface = gr.Interface( | |
| fn=predict, | |
| inputs=gr.Textbox(lines=2, placeholder="Enter text here..."), | |
| outputs="textbox", | |
| title="Emotion Classification", | |
| description="Enter some text and the model will predict the emotion", | |
| ) | |
| # Launch the interface | |
| iface.launch() |