File size: 1,344 Bytes
d372f3d
 
 
 
 
f3d6de9
d372f3d
 
f3d6de9
d372f3d
 
f3d6de9
 
d372f3d
 
 
 
f3d6de9
d372f3d
f3d6de9
 
d372f3d
f3d6de9
 
 
 
 
d372f3d
 
 
f3d6de9
 
 
 
d372f3d
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import gradio as gr

MODEL_NAME = "bert-base-uncased"
WEIGHTS_PATH = "bert_sentiment_model.pt"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)

# ⚠️ Make sure num_labels matches your training setup (2 if you trained with Positive/Negative)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=2)
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=DEVICE))
model.to(DEVICE)
model.eval()

id2label = {0: "Positive", 1: "Negative"}

def predict_sentiment(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=256).to(DEVICE)
    with torch.no_grad():
        outputs = model(**inputs)
        probs = torch.softmax(outputs.logits, dim=1)
        pred = torch.argmax(probs, dim=1).item()
        confidence = probs[0][pred].item()
    return f"{id2label[pred]} ({confidence:.2f} confidence)"

demo = gr.Interface(
    fn=predict_sentiment,
    inputs=gr.Textbox(lines=3, placeholder="Type your review here..."),
    outputs="text",
    title="Sentiment Analyzer (BERT)",
    description="Predicts whether text sentiment is Positive or Negative."
)

if __name__ == "__main__":
    demo.launch()