app.py
Browse filesimport gradio as gr
import numpy as np
import plotly.express as px
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
MODEL_NAME = "cardiffnlp/twitter-xlm-roberta-base-sentiment"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME).to(device)
model.eval()
def predict_sentiment(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
probs = torch.softmax(outputs.logits, dim=-1)[0].cpu().numpy()
pred = np.argmax(probs)
label = "✅ Positive" if pred == 1 else "❌ Negative"
confidence = f"{probs[pred]:.1%}"
fig = px.bar(x=["Negative", "Positive"], y=probs, title=f"Sentiment: {label}")
fig.update_yaxes(range=[0, 1])
return label, confidence, fig
def chat_response(message, history):
if not message.strip():
return "", history
label, conf, plot = predict_sentiment(message)
bot_message = f"**Sentiment:** {label}\n**Confidence:** {conf}"
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": bot_message})
return "", history, plot
with gr.Blocks(title="Sentiment Chatbot") as demo:
gr.Markdown("# 🗣️ Sentiment Chatbot (EN/AR)")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(type="messages", height=500)
msg_input = gr.Textbox(placeholder="اكتب بالعربية أو الإنجليزية...")
with gr.Column(scale=1):
sentiment_plot = gr.Plot(label="📊 Confidence")
msg_input.submit(chat_response, [msg_input, chatbot], [msg_input, chatbot, sentiment_plot])
demo.launch()
- requirements.txt +0 -1
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
|
| 2 |
gradio==4.44.0
|
| 3 |
torch
|
| 4 |
transformers
|
|
|
|
|
|
|
| 1 |
gradio==4.44.0
|
| 2 |
torch
|
| 3 |
transformers
|