miasambolec commited on
Commit
a152575
·
verified ·
1 Parent(s): 3dea9cd

Update demo.py

Browse files
Files changed (1) hide show
  1. demo.py +15 -5
demo.py CHANGED
@@ -5,6 +5,7 @@ from huggingface_hub import hf_hub_download
5
  import gradio as gr
6
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
  import torch
 
8
  import pickle
9
  import numpy as np
10
  from tensorflow.keras.models import load_model
@@ -19,7 +20,7 @@ vectorizer_path = hf_hub_download(repo_id=svm_repo_id, filename="vectorizer.pkl"
19
  with open(vectorizer_path, "rb") as f:
20
  vectorizer = pickle.load(f)
21
 
22
- lstm_repo_id = "arjahojnik/LSTM-sentiment-model"
23
  lstm_model_path = hf_hub_download(repo_id=lstm_repo_id, filename="LSTM_model.h5")
24
  lstm_model = load_model(lstm_model_path)
25
  lstm_tokenizer_path = hf_hub_download(repo_id=lstm_repo_id, filename="my_tokenizer.pkl")
@@ -47,7 +48,7 @@ def predict_with_lstm(text):
47
  padded_seq = pad_sequences(seq, maxlen=200)
48
  probs = lstm_model.predict(padded_seq)
49
  predicted_class = np.argmax(probs, axis=1)[0]
50
- return int(predicted_class + 1)
51
 
52
  def predict_with_bert(text):
53
  inputs = bert_tokenizer([text], padding=True, truncation=True, max_length=512, return_tensors="pt").to(device)
@@ -55,7 +56,13 @@ def predict_with_bert(text):
55
  outputs = bert_model(**inputs)
56
  logits = outputs.logits
57
  predictions = logits.argmax(axis=-1).cpu().numpy()
58
- return int(predictions[0] + 1)
 
 
 
 
 
 
59
 
60
  def analyze_sentiment(text):
61
  results = {
@@ -65,7 +72,7 @@ def analyze_sentiment(text):
65
  }
66
  scores = list(results.values())
67
  average = np.mean(scores)
68
- stats = f"Average Score: {average:.2f}\n"
69
  return (
70
  convert_to_stars(results["SVM"]),
71
  convert_to_stars(results["LSTM"]),
@@ -74,7 +81,10 @@ def analyze_sentiment(text):
74
  )
75
 
76
  def convert_to_stars(score):
77
- return "★" * score + "☆" * (5 - score)
 
 
 
78
 
79
  def process_input(text):
80
  if not text.strip():
 
5
  import gradio as gr
6
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
  import torch
8
+
9
  import pickle
10
  import numpy as np
11
  from tensorflow.keras.models import load_model
 
20
  with open(vectorizer_path, "rb") as f:
21
  vectorizer = pickle.load(f)
22
 
23
+ lstm_repo_id = "HighFive-OPJ/Deep_Learning"
24
  lstm_model_path = hf_hub_download(repo_id=lstm_repo_id, filename="LSTM_model.h5")
25
  lstm_model = load_model(lstm_model_path)
26
  lstm_tokenizer_path = hf_hub_download(repo_id=lstm_repo_id, filename="my_tokenizer.pkl")
 
48
  padded_seq = pad_sequences(seq, maxlen=200)
49
  probs = lstm_model.predict(padded_seq)
50
  predicted_class = np.argmax(probs, axis=1)[0]
51
+ return int(predicted_class)
52
 
53
  def predict_with_bert(text):
54
  inputs = bert_tokenizer([text], padding=True, truncation=True, max_length=512, return_tensors="pt").to(device)
 
56
  outputs = bert_model(**inputs)
57
  logits = outputs.logits
58
  predictions = logits.argmax(axis=-1).cpu().numpy()
59
+ bert_score = int(predictions[0])
60
+ if bert_score <= 2:
61
+ return 0
62
+ elif bert_score == 3:
63
+ return 1
64
+ else:
65
+ return 2
66
 
67
  def analyze_sentiment(text):
68
  results = {
 
72
  }
73
  scores = list(results.values())
74
  average = np.mean(scores)
75
+ stats = f"Average Score (0=Neg,1=Neu,2=Pos): {average:.2f}\n"
76
  return (
77
  convert_to_stars(results["SVM"]),
78
  convert_to_stars(results["LSTM"]),
 
81
  )
82
 
83
  def convert_to_stars(score):
84
+ # Map 0->1 star, 1->3 stars, 2->5 stars
85
+ star_map = {0: 1, 1: 3, 2: 5}
86
+ stars = star_map.get(score, 3)
87
+ return "★" * stars + "☆" * (5 - stars)
88
 
89
  def process_input(text):
90
  if not text.strip():