miasambolec commited on
Commit
1d5333f
·
verified ·
1 Parent(s): b0bcb4b

Create demo.py

Browse files
Files changed (1) hide show
  1. demo.py +113 -0
demo.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
3
+
4
+ from huggingface_hub import hf_hub_download
5
+ import gradio as gr
6
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
+ import torch
8
+ import pickle
9
+ import numpy as np
10
+ from tensorflow.keras.models import load_model
11
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
12
+ import re
13
+
14
+ # Load SVM model and vectorizer
15
+ svm_repo_id = "your-username/svm-sentiment-model" # Replace with your actual repo
16
+ svm_model_path = hf_hub_download(repo_id=svm_repo_id, filename="svm_model.pkl")
17
+ with open(svm_model_path, "rb") as f:
18
+ svm_model = pickle.load(f)
19
+ vectorizer_path = hf_hub_download(repo_id=svm_repo_id, filename="vectorizer.pkl")
20
+ with open(vectorizer_path, "rb") as f:
21
+ vectorizer = pickle.load(f)
22
+
23
+ # Load LSTM model and tokenizer
24
+ lstm_repo_id = "arjahojnik/LSTM-sentiment-model"
25
+ lstm_model_path = hf_hub_download(repo_id=lstm_repo_id, filename="LSTM_model.h5")
26
+ lstm_model = load_model(lstm_model_path)
27
+ lstm_tokenizer_path = hf_hub_download(repo_id=lstm_repo_id, filename="my_tokenizer.pkl")
28
+ with open(lstm_tokenizer_path, "rb") as f:
29
+ lstm_tokenizer = pickle.load(f)
30
+
31
+ # Load BERT model and tokenizer
32
+ bert_tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
33
+ bert_model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
34
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
+ bert_model.to(device)
36
+
37
+ def preprocess_text(text):
38
+ text = text.lower()
39
+ text = re.sub(r"[^a-zA-Z\s]", "", text).strip()
40
+ return text
41
+
42
+ def predict_with_svm(text):
43
+ transformed = vectorizer.transform([text])
44
+ prediction = svm_model.predict(transformed)
45
+ return int(prediction[0])
46
+
47
+ def predict_with_lstm(text):
48
+ cleaned = preprocess_text(text)
49
+ seq = lstm_tokenizer.texts_to_sequences([cleaned])
50
+ padded_seq = pad_sequences(seq, maxlen=200)
51
+ probs = lstm_model.predict(padded_seq)
52
+ predicted_class = np.argmax(probs, axis=1)[0]
53
+ return int(predicted_class + 1)
54
+
55
+ def predict_with_bert(text):
56
+ inputs = bert_tokenizer([text], padding=True, truncation=True, max_length=512, return_tensors="pt").to(device)
57
+ with torch.no_grad():
58
+ outputs = bert_model(**inputs)
59
+ logits = outputs.logits
60
+ predictions = logits.argmax(axis=-1).cpu().numpy()
61
+ return int(predictions[0] + 1)
62
+
63
+ def analyze_sentiment(text):
64
+ results = {
65
+ "SVM": predict_with_svm(text),
66
+ "LSTM": predict_with_lstm(text),
67
+ "BERT": predict_with_bert(text)
68
+ }
69
+ scores = list(results.values())
70
+ average = np.mean(scores)
71
+ stats = f"Average Score: {average:.2f}\n"
72
+ return (
73
+ convert_to_stars(results["SVM"]),
74
+ convert_to_stars(results["LSTM"]),
75
+ convert_to_stars(results["BERT"]),
76
+ stats
77
+ )
78
+
79
+ def convert_to_stars(score):
80
+ return "★" * score + "☆" * (5 - score)
81
+
82
+ def process_input(text):
83
+ if not text.strip():
84
+ return ("", "", "", "Please enter valid text.")
85
+ return analyze_sentiment(text)
86
+
87
+ with gr.Blocks() as demo:
88
+ gr.Markdown("# Sentiment Analysis Demo")
89
+ gr.Markdown("""
90
+ Enter a review and see how different models evaluate its sentiment! This app uses:
91
+ - SVM for classic machine learning
92
+ - LSTM for deep learning
93
+ - BERT for transformer-based analysis
94
+ """)
95
+
96
+ with gr.Row():
97
+ with gr.Column():
98
+ input_text = gr.Textbox(label="Enter your review:", lines=3)
99
+ analyze_button = gr.Button("Analyze Sentiment")
100
+
101
+ with gr.Column():
102
+ svm_output = gr.Textbox(label="SVM", interactive=False)
103
+ lstm_output = gr.Textbox(label="LSTM", interactive=False)
104
+ bert_output = gr.Textbox(label="BERT", interactive=False)
105
+ stats_output = gr.Textbox(label="Statistics", interactive=False)
106
+
107
+ analyze_button.click(
108
+ process_input,
109
+ inputs=[input_text],
110
+ outputs=[svm_output, lstm_output, bert_output, stats_output]
111
+ )
112
+
113
+ demo.launch()