Spaces:
No application file
No application file
File size: 1,859 Bytes
a59fdee 0ee5b16 a59fdee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | import streamlit as st
from transformers import BertTokenizer, BertForSequenceClassification
import torch
# Load the fine-tuned model and tokenizer
@st.cache_resource
def load_model():
model = BertForSequenceClassification.from_pretrained('nik20004/absa-sentiment-app') # Path to your saved model
tokenizer = BertTokenizer.from_pretrained('nik20004/absa-sentiment-app')
return model, tokenizer
model, tokenizer = load_model()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
# Streamlit app title
st.title("Aspect-Based Sentiment Analysis Web App")
# Input fields for sentence and aspect
sentence = st.text_area("Enter the review or sentence:")
aspect = st.text_input("Enter the aspect to analyze (e.g., 'food', 'service'):")
# Function to predict sentiment
def predict_sentiment(sentence, aspect):
if not sentence or not aspect:
return "Please enter both a sentence and an aspect."
# Preprocess the input
inputs = tokenizer.encode_plus(
aspect,
sentence,
add_special_tokens=True,
max_length=128,
padding='max_length',
truncation=True,
return_tensors='pt'
)
input_ids = inputs['input_ids'].to(device)
attention_mask = inputs['attention_mask'].to(device)
# Get predictions
model.eval()
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask)
logits = outputs.logits
predicted_class = torch.argmax(logits, dim=1).item()
# Map class index to sentiment
label_map = {0: "Negative", 1: "Neutral", 2: "Positive"}
return label_map[predicted_class]
# Button for prediction
if st.button("Analyze Sentiment"):
result = predict_sentiment(sentence, aspect)
st.subheader(f"Aspect: {aspect}")
st.write(f"Predicted Sentiment: **{result}**")
|