|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
import gradio as gr |
|
|
|
|
|
MODEL_NAME = "bert-base-uncased" |
|
|
WEIGHTS_PATH = "bert_sentiment_model.pt" |
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
|
|
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=2) |
|
|
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=DEVICE)) |
|
|
model.to(DEVICE) |
|
|
model.eval() |
|
|
|
|
|
id2label = {0: "Positive", 1: "Negative"} |
|
|
|
|
|
def predict_sentiment(text): |
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=256).to(DEVICE) |
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
probs = torch.softmax(outputs.logits, dim=1) |
|
|
pred = torch.argmax(probs, dim=1).item() |
|
|
confidence = probs[0][pred].item() |
|
|
return f"{id2label[pred]} ({confidence:.2f} confidence)" |
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=predict_sentiment, |
|
|
inputs=gr.Textbox(lines=3, placeholder="Type your review here..."), |
|
|
outputs="text", |
|
|
title="Sentiment Analyzer (BERT)", |
|
|
description="Predicts whether text sentiment is Positive or Negative." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |