|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
from transformers import BertTokenizer |
|
|
from model.emotion_model import Rosa |
|
|
|
|
|
|
|
|
emotion_labels = [ |
|
|
"admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity", |
|
|
"desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear", |
|
|
"gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief", |
|
|
"remorse", "sadness", "surprise", "neutral" |
|
|
] |
|
|
|
|
|
|
|
|
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") |
|
|
model = Rosa(model_name="bert-base-uncased", num_emotions=28, emotion_labels=emotion_labels) |
|
|
model.load_state_dict(torch.load("rosa.pt", map_location=torch.device("cpu"))) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
def predict(text: str): |
|
|
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) |
|
|
with torch.no_grad(): |
|
|
output = model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]) |
|
|
probs = torch.sigmoid(output["logits"]).squeeze() |
|
|
return list(zip(emotion_labels, probs.tolist())) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
test_text = "My heart is filled with longing and beauty." |
|
|
results = predict(test_text) |
|
|
|
|
|
print("\n🌹 Rosa's Emotional Reading:\n") |
|
|
for emotion, score in results: |
|
|
print(f" → {emotion}: {score:.4f}") |
|
|
|