|
|
|
|
| from transformers import BertTokenizer, TFBertForSequenceClassification
|
| import os
|
|
|
|
|
| import tensorflow as tf
|
| from transformers import BertTokenizer, TFBertForSequenceClassification
|
| import pandas as pd
|
|
|
| import re
|
|
|
|
|
|
|
|
|
|
|
| from sklearn.model_selection import train_test_split
|
| from sklearn.metrics import classification_report
|
| import streamlit as st
|
|
|
|
|
| path = 'path-to-save'
|
|
|
| bert_tokenizer = BertTokenizer.from_pretrained(path +'/Tokenizer')
|
|
|
|
|
| bert_model = TFBertForSequenceClassification.from_pretrained(path +'/Model')
|
|
|
|
|
| def Get_sentiment(Review, Tokenizer=bert_tokenizer, Model=bert_model):
|
|
|
| if not isinstance(Review, list):
|
| Review = [Review]
|
|
|
| Input_ids, Token_type_ids, Attention_mask = Tokenizer.batch_encode_plus(Review,
|
| padding=True,
|
| truncation=True,
|
| max_length=128,
|
| return_tensors='tf').values()
|
| prediction = Model.predict([Input_ids, Token_type_ids, Attention_mask])
|
|
|
|
|
|
|
| pred_labels = tf.argmax(prediction.logits, axis=1)
|
|
|
|
|
| label = {
|
| 1: 'positive',
|
| 0: 'Negative'
|
| }
|
|
|
|
|
| pred_labels = [label[i] for i in pred_labels.numpy().tolist()]
|
| return pred_labels
|
|
|
|
|
|
|
|
|
|
|
|
|
| Review= st.text_area('Input your text')
|
|
|
| if Review:
|
| sentiment_labels = Get_sentiment(Review)
|
| print(sentiment_labels)
|
| st.write(sentiment_labels,Review)
|
|
|
|
|