Spaces:
Sleeping
Sleeping
File size: 1,785 Bytes
31f733b 3e85e07 e3e8e9d 2781600 9a30469 e3e8e9d 683e053 31f733b 2234d33 31f733b 2781600 31f733b 1a10e36 31f733b 69476a9 31f733b 72a00a0 31f733b 812dc31 31f733b c6614b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
# import part
import streamlit as st
from transformers import pipeline, AutoTokenizer
import numpy as np
import random
import torch
# Set random seed for reproducibility
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# Load the tokenizer for the sentiment analysis model
tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
# Initialize sentiment analysis pipeline with the custom model and tokenizer
sentiment_pipeline = pipeline("sentiment-analysis", model="EmmaL1/CustomModel_amazon", tokenizer=tokenizer)
# Initialize question-answering pipeline
qa_pipeline = pipeline("question-answering", model="distilbert/distilbert-base-cased-distilled-squad")
# function part
# text classification
def textclassification():
st.title("Amazon Customer Sentiment Analysis: Ratings and Reasons")
st.write("Enter a sentence to analyze its rating and reason:")
user_input = st.text_input("Input your text:")
if user_input:
# Sentiment Analysis
sentiment_result = sentiment_pipeline(user_input)
sentiment = sentiment_result[0]["label"]
confidence = sentiment_result[0]["score"]
st.write(f"Sentiment: {sentiment}")
st.write(f"Confidence: {confidence:.2f}")
# Display the rating
st.write(f"The rating is {sentiment}")
# Question Answering
qa_input = {
'question': f'Why is the rating {sentiment} star?',
'context': user_input # Use user input as context
}
qa_result = qa_pipeline(qa_input)
st.write(f"Reasons: {qa_result['answer']}")
def main():
textclassification()
if __name__ == "__main__":
main() |