|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
import torch |
|
|
|
|
|
MODEL = "tabularisai/multilingual-sentiment-analysis" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL) |
|
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL) |
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
model.to(device) |
|
|
|
|
|
labels = model.config.id2label |
|
|
|
|
|
def analyze(text): |
|
|
if not text or text.strip() == "": |
|
|
return { |
|
|
"label": "Neutral", |
|
|
"score": 1.0, |
|
|
"all_scores": { |
|
|
"Very Negative": 0.0, |
|
|
"Negative": 0.0, |
|
|
"Neutral": 1.0, |
|
|
"Positive": 0.0, |
|
|
"Very Positive": 0.0 |
|
|
}, |
|
|
"positive_strength": 0.0, |
|
|
"negative_strength": 0.0, |
|
|
"bot_action": "none" |
|
|
} |
|
|
try: |
|
|
enc = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128).to(device) |
|
|
with torch.no_grad(): |
|
|
out = model(**enc) |
|
|
scores = torch.softmax(out.logits[0], dim=-1).cpu().tolist() |
|
|
all_scores = {labels[i]: round(scores[i], 3) for i in range(len(scores))} |
|
|
best_idx = int(torch.argmax(out.logits[0])) |
|
|
best_score = scores[best_idx] |
|
|
final_label = labels[best_idx] |
|
|
positive_strength = all_scores.get("Positive", 0) + all_scores.get("Very Positive", 0) |
|
|
negative_strength = all_scores.get("Negative", 0) + all_scores.get("Very Negative", 0) |
|
|
threshold = 0.765 |
|
|
if positive_strength > threshold: |
|
|
bot_action = "send_positive_sticker" |
|
|
elif negative_strength > threshold: |
|
|
bot_action = "send_negative_sticker" |
|
|
else: |
|
|
bot_action = "none" |
|
|
return { |
|
|
"label": final_label, |
|
|
"score": round(best_score, 3), |
|
|
"all_scores": all_scores, |
|
|
"positive_strength": round(positive_strength, 3), |
|
|
"negative_strength": round(negative_strength, 3), |
|
|
"bot_action": bot_action |
|
|
} |
|
|
except Exception as e: |
|
|
return { |
|
|
"label": "Error", |
|
|
"score": 0.0, |
|
|
"all_scores": {"Error": 1.0}, |
|
|
"positive_strength": 0.0, |
|
|
"negative_strength": 0.0, |
|
|
"bot_action": "none", |
|
|
"error": str(e) |
|
|
} |
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=analyze, |
|
|
inputs=gr.Textbox( |
|
|
lines=2, |
|
|
placeholder="Text in English or Russian...", |
|
|
label="Input Text" |
|
|
), |
|
|
outputs=gr.JSON( |
|
|
label="Sentiment Analysis Result" |
|
|
), |
|
|
title="Bilingual Sentiment Analysis", |
|
|
description="Detailed sentiment analysis with bot action recommendations", |
|
|
examples=[ |
|
|
["I love this product!"], |
|
|
["This is terrible, I hate it."], |
|
|
["It's okay, nothing special."], |
|
|
["Привет, как дела?"], |
|
|
["Это ужасно, я в ярости!"] |
|
|
], |
|
|
api_name="predict" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue() |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
show_api=True, |
|
|
share=False |
|
|
) |