# import part import streamlit as st from transformers import pipeline, AutoTokenizer import numpy as np import random import torch # Set random seed for reproducibility seed = 42 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # Load the tokenizer for the sentiment analysis model tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") # Initialize sentiment analysis pipeline with the custom model and tokenizer sentiment_pipeline = pipeline("sentiment-analysis", model="EmmaL1/CustomModel_amazon", tokenizer=tokenizer) # Initialize question-answering pipeline qa_pipeline = pipeline("question-answering", model="distilbert/distilbert-base-cased-distilled-squad") # function part # text classification def textclassification(): st.title("Amazon Customer Sentiment Analysis: Ratings and Reasons") st.write("Enter a sentence to analyze its rating and reason:") user_input = st.text_input("Input your text:") if user_input: # Sentiment Analysis sentiment_result = sentiment_pipeline(user_input) sentiment = sentiment_result[0]["label"] confidence = sentiment_result[0]["score"] st.write(f"Sentiment: {sentiment}") st.write(f"Confidence: {confidence:.2f}") # Display the rating st.write(f"The rating is {sentiment}") # Question Answering qa_input = { 'question': f'Why is the rating {sentiment} star?', 'context': user_input # Use user input as context } qa_result = qa_pipeline(qa_input) st.write(f"Reasons: {qa_result['answer']}") def main(): textclassification() if __name__ == "__main__": main()