miasambolec commited on
Commit
fd63207
·
verified ·
1 Parent(s): f648418

Delete demo.py

Browse files
Files changed (1) hide show
  1. demo.py +0 -120
demo.py DELETED
@@ -1,120 +0,0 @@
1
- import os
2
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
3
-
4
- from huggingface_hub import hf_hub_download
5
- import gradio as gr
6
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
- import torch
8
-
9
- import pickle
10
- import numpy as np
11
- from tensorflow.keras.models import load_model
12
- from tensorflow.keras.preprocessing.sequence import pad_sequences
13
- import re
14
-
15
- svm_repo_id = "HighFive-OPJ/Deep_Learning"
16
- svm_model_path = hf_hub_download(repo_id=svm_repo_id, filename="svm_model.pkl")
17
- with open(svm_model_path, "rb") as f:
18
- svm_model = pickle.load(f)
19
- vectorizer_path = hf_hub_download(repo_id=svm_repo_id, filename="vectorizer.pkl")
20
- with open(vectorizer_path, "rb") as f:
21
- vectorizer = pickle.load(f)
22
-
23
- lstm_repo_id = "HighFive-OPJ/Deep_Learning"
24
- lstm_model_path = hf_hub_download(repo_id=lstm_repo_id, filename="LSTM_model.h5")
25
- lstm_model = load_model(lstm_model_path)
26
- lstm_tokenizer_path = hf_hub_download(repo_id=lstm_repo_id, filename="my_tokenizer.pkl")
27
- with open(lstm_tokenizer_path, "rb") as f:
28
- lstm_tokenizer = pickle.load(f)
29
-
30
- bert_tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
31
- bert_model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
32
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
33
- bert_model.to(device)
34
-
35
- def preprocess_text(text):
36
- text = text.lower()
37
- text = re.sub(r"[^a-zA-Z\s]", "", text).strip()
38
- return text
39
-
40
- def predict_with_svm(text):
41
- transformed = vectorizer.transform([text])
42
- prediction = svm_model.predict(transformed)
43
- return int(prediction[0])
44
-
45
- def predict_with_lstm(text):
46
- cleaned = preprocess_text(text)
47
- seq = lstm_tokenizer.texts_to_sequences([cleaned])
48
- padded_seq = pad_sequences(seq, maxlen=200)
49
- probs = lstm_model.predict(padded_seq)
50
- predicted_class = np.argmax(probs, axis=1)[0]
51
- return int(predicted_class)
52
-
53
- def predict_with_bert(text):
54
- inputs = bert_tokenizer([text], padding=True, truncation=True, max_length=512, return_tensors="pt").to(device)
55
- with torch.no_grad():
56
- outputs = bert_model(**inputs)
57
- logits = outputs.logits
58
- predictions = logits.argmax(axis=-1).cpu().numpy()
59
- bert_score = int(predictions[0])
60
- if bert_score <= 2:
61
- return 0
62
- elif bert_score == 3:
63
- return 1
64
- else:
65
- return 2
66
-
67
- def analyze_sentiment(text):
68
- results = {
69
- "SVM": predict_with_svm(text),
70
- "LSTM": predict_with_lstm(text),
71
- "BERT": predict_with_bert(text)
72
- }
73
- scores = list(results.values())
74
- average = np.mean(scores)
75
- stats = f"Average Score (0=Neg,1=Neu,2=Pos): {average:.2f}\n"
76
- return (
77
- convert_to_stars(results["SVM"]),
78
- convert_to_stars(results["LSTM"]),
79
- convert_to_stars(results["BERT"]),
80
- stats
81
- )
82
-
83
- def convert_to_stars(score):
84
- # Map 0->1 star, 1->3 stars, 2->5 stars
85
- star_map = {0: 1, 1: 3, 2: 5}
86
- stars = star_map.get(score, 3)
87
- return "★" * stars + "☆" * (5 - stars)
88
-
89
- def process_input(text):
90
- if not text.strip():
91
- return ("", "", "", "Please enter valid text.")
92
- return analyze_sentiment(text)
93
-
94
- with gr.Blocks() as demo:
95
- gr.Markdown("# Sentiment Analysis Demo")
96
- gr.Markdown("""
97
- Enter a review and see how different models evaluate its sentiment! This app uses:
98
- - SVM for classic machine learning
99
- - LSTM for deep learning
100
- - BERT for transformer-based analysis
101
- """)
102
-
103
- with gr.Row():
104
- with gr.Column():
105
- input_text = gr.Textbox(label="Enter your review:", lines=3)
106
- analyze_button = gr.Button("Analyze Sentiment")
107
-
108
- with gr.Column():
109
- svm_output = gr.Textbox(label="SVM", interactive=False)
110
- lstm_output = gr.Textbox(label="LSTM", interactive=False)
111
- bert_output = gr.Textbox(label="BERT", interactive=False)
112
- stats_output = gr.Textbox(label="Statistics", interactive=False)
113
-
114
- analyze_button.click(
115
- process_input,
116
- inputs=[input_text],
117
- outputs=[svm_output, lstm_output, bert_output, stats_output]
118
- )
119
-
120
- demo.launch()