zafirabdullah12 commited on
Commit
4014b56
Β·
verified Β·
1 Parent(s): 1ed3be5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -66
app.py CHANGED
@@ -1,6 +1,7 @@
1
  # Importing Libraries
2
  import os
3
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
 
4
 
5
  import re
6
  import nltk
@@ -17,49 +18,45 @@ MAX_LEN = 100
17
  MODEL_PATH = "sentiment_analysis_best.keras"
18
  TOKENIZER_PATH = "tokenizer.pkl"
19
 
20
- nltk.download('stopwords')
21
-
22
- with open("tokenizer.pkl", "rb") as f:
23
- tokenizer = pickle.load(f)
24
 
25
  # Expand common English contractions
26
  def expand_contractions(text):
27
- """Expand common English contractions"""
28
- contractions = {
29
- "i'm": "i am", "you're": "you are", "he's": "he is",
30
- "she's": "she is", "it's": "it is", "we're": "we are",
31
- "they're": "they are", "i've": "i have", "you've": "you have",
32
- "we've": "we have", "they've": "they have", "i'll": "i will",
33
- "you'll": "you will", "he'll": "he will", "she'll": "she will",
34
- "we'll": "we will", "they'll": "they will", "i'd": "i would",
35
- "you'd": "you would", "he'd": "he would", "she'd": "she would",
36
- "we'd": "we would", "they'd": "they would", "don't": "do not",
37
- "doesn't": "does not", "didn't": "did not", "can't": "cannot",
38
- "couldn't": "could not", "won't": "will not", "wouldn't": "would not",
39
- "shouldn't": "should not", "isn't": "is not", "aren't": "are not",
40
- "wasn't": "was not", "weren't": "were not", "hasn't": "has not",
41
- "haven't": "have not", "hadn't": "had not", "mightn't": "might not",
42
- "mustn't": "must not", "needn't": "need not", "shan't": "shall not"
43
- }
44
 
45
- for contraction, expansion in contractions.items():
46
- text = re.sub(r'\b' + contraction + r'\b', expansion, text, flags=re.IGNORECASE)
47
-
48
- return text
49
 
50
  # Preprocessing Function
51
  def preprocess(text):
52
-
53
  # Define words to keep
54
  negations = {"not", "no", "nor", "never", "n't", "nobody", "nothing", "neither", "nowhere", "none"}
55
  important_words = {"am", "is", "are", "was", "were", "be", "been", "being"}
56
 
57
  try:
58
- from nltk.corpus import stopwords
59
- stop_words = set(stopwords.words("english")) - negations - important_words
60
  except:
61
- # Fallback if NLTK not available
62
- stop_words =set()
63
 
64
  # Convert to lowercase
65
  text = text.lower()
@@ -80,25 +77,25 @@ def preprocess(text):
80
 
81
  # Load Train Model and Tokenizer
82
  def load_resources():
83
- try:
84
- # Load model
85
- model = load_model(MODEL_PATH)
86
- print(f"Model loaded successfully from {MODEL_PATH}")
87
 
88
- # Load Tokenizer
89
- with open(TOKENIZER_PATH, "rb") as f:
90
- tokenizer = pickle.load(f)
91
- print(f"Tokenizer loaded successfully from {TOKENIZER_PATH}")
92
 
93
- return model, tokenizer
94
 
95
- except FileNotFoundError as e:
96
- print(f"Error: Model or Tokenizer file not found!")
97
- print(f" Make sure {MODEL_PATH} AND {TOKENIZER_PATH} are in the same directory.")
98
- raise e
99
- except Exception as e:
100
- print(f"Error loading resources: {e}")
101
- raise e
102
 
103
  # Load model and tokenizer globally
104
  model, tokenizer = load_resources()
@@ -132,20 +129,20 @@ def predict_sentiment(text):
132
 
133
  # Create detailed results
134
  detailed_results = f"""
135
- ### πŸ“Š Detailed Analysis:
136
 
137
- **Original Text:** {text}
138
 
139
- **Processed Text:** {processed_text}
140
 
141
- **Prediction Probabilities:**
142
- - 😞 Negative: {pred[0][0] * 100:.2f}%
143
- - 😊 Positive: {pred[0][1] * 100:.2f}%
144
- - 😐 Neutral: {pred[0][2] * 100:.2f}%
145
 
146
- **Final Sentiment:** {sentiment}
147
- **Confidence:** {confidence_percentage}
148
- """
149
 
150
  return sentiment, confidence_percentage, detailed_results
151
 
@@ -178,7 +175,7 @@ def create_gradio_interface():
178
  ]
179
 
180
  # Create interface
181
- with gr.Blocks(title="Sentiment Analysis") as interface:
182
 
183
  # Header
184
  gr.Markdown("""
@@ -262,25 +259,23 @@ def create_gradio_interface():
262
  return interface
263
 
264
 
265
-
266
  # MAIN EXECUTION
267
  if __name__ == "__main__":
268
  print("\n" + "=" * 70)
269
  print("πŸš€ Starting Sentiment Analysis Gradio Interface...")
270
- print("=" * 70)
271
 
272
  # Create and launch interface
273
  interface = create_gradio_interface()
274
 
275
  # Launch with configuration
276
  interface.launch(
277
- server_name="0.0.0.0",
278
- server_port=7860,
279
- theme=gr.themes.Soft(),
280
- ssr_mode=False,
281
- share=False,
282
- show_error=True
283
- )
284
 
285
  print("\n" + "=" * 70)
286
  print("βœ“ Interface is running!")
 
1
  # Importing Libraries
2
  import os
3
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
4
+ os.environ['GRADIO_HOT_RELOAD'] = '0' # Disable hot reload to prevent threading errors
5
 
6
  import re
7
  import nltk
 
18
  MODEL_PATH = "sentiment_analysis_best.keras"
19
  TOKENIZER_PATH = "tokenizer.pkl"
20
 
21
+ nltk.download('stopwords', quiet=True)
 
 
 
22
 
23
  # Expand common English contractions
24
  def expand_contractions(text):
25
+ """Expand common English contractions"""
26
+ contractions = {
27
+ "i'm": "i am", "you're": "you are", "he's": "he is",
28
+ "she's": "she is", "it's": "it is", "we're": "we are",
29
+ "they're": "they are", "i've": "i have", "you've": "you have",
30
+ "we've": "we have", "they've": "they have", "i'll": "i will",
31
+ "you'll": "you will", "he'll": "he will", "she'll": "she will",
32
+ "we'll": "we will", "they'll": "they will", "i'd": "i would",
33
+ "you'd": "you would", "he'd": "he would", "she'd": "she would",
34
+ "we'd": "we would", "they'd": "they would", "don't": "do not",
35
+ "doesn't": "does not", "didn't": "did not", "can't": "cannot",
36
+ "couldn't": "could not", "won't": "will not", "wouldn't": "would not",
37
+ "shouldn't": "should not", "isn't": "is not", "aren't": "are not",
38
+ "wasn't": "was not", "weren't": "were not", "hasn't": "has not",
39
+ "haven't": "have not", "hadn't": "had not", "mightn't": "might not",
40
+ "mustn't": "must not", "needn't": "need not", "shan't": "shall not"
41
+ }
42
 
43
+ for contraction, expansion in contractions.items():
44
+ text = re.sub(r'\b' + contraction + r'\b', expansion, text, flags=re.IGNORECASE)
45
+
46
+ return text
47
 
48
  # Preprocessing Function
49
  def preprocess(text):
 
50
  # Define words to keep
51
  negations = {"not", "no", "nor", "never", "n't", "nobody", "nothing", "neither", "nowhere", "none"}
52
  important_words = {"am", "is", "are", "was", "were", "be", "been", "being"}
53
 
54
  try:
55
+ from nltk.corpus import stopwords
56
+ stop_words = set(stopwords.words("english")) - negations - important_words
57
  except:
58
+ # Fallback if NLTK not available
59
+ stop_words = set()
60
 
61
  # Convert to lowercase
62
  text = text.lower()
 
77
 
78
  # Load Train Model and Tokenizer
79
  def load_resources():
80
+ try:
81
+ # Load model
82
+ model = load_model(MODEL_PATH)
83
+ print(f"βœ“ Model loaded successfully from {MODEL_PATH}")
84
 
85
+ # Load Tokenizer
86
+ with open(TOKENIZER_PATH, "rb") as f:
87
+ tokenizer = pickle.load(f)
88
+ print(f"βœ“ Tokenizer loaded successfully from {TOKENIZER_PATH}")
89
 
90
+ return model, tokenizer
91
 
92
+ except FileNotFoundError as e:
93
+ print(f"βœ— Error: Model or Tokenizer file not found!")
94
+ print(f" Make sure {MODEL_PATH} AND {TOKENIZER_PATH} are in the same directory.")
95
+ raise e
96
+ except Exception as e:
97
+ print(f"βœ— Error loading resources: {e}")
98
+ raise e
99
 
100
  # Load model and tokenizer globally
101
  model, tokenizer = load_resources()
 
129
 
130
  # Create detailed results
131
  detailed_results = f"""
132
+ ### πŸ“Š Detailed Analysis:
133
 
134
+ **Original Text:** {text}
135
 
136
+ **Processed Text:** {processed_text}
137
 
138
+ **Prediction Probabilities:**
139
+ - 😞 Negative: {pred[0][0] * 100:.2f}%
140
+ - 😊 Positive: {pred[0][1] * 100:.2f}%
141
+ - 😐 Neutral: {pred[0][2] * 100:.2f}%
142
 
143
+ **Final Sentiment:** {sentiment}
144
+ **Confidence:** {confidence_percentage}
145
+ """
146
 
147
  return sentiment, confidence_percentage, detailed_results
148
 
 
175
  ]
176
 
177
  # Create interface
178
+ with gr.Blocks(title="Sentiment Analysis", theme=gr.themes.Soft()) as interface:
179
 
180
  # Header
181
  gr.Markdown("""
 
259
  return interface
260
 
261
 
 
262
  # MAIN EXECUTION
263
  if __name__ == "__main__":
264
  print("\n" + "=" * 70)
265
  print("πŸš€ Starting Sentiment Analysis Gradio Interface...")
266
+ print("=" * 70 + "\n")
267
 
268
  # Create and launch interface
269
  interface = create_gradio_interface()
270
 
271
  # Launch with configuration
272
  interface.launch(
273
+ server_name="0.0.0.0",
274
+ server_port=7860,
275
+ share=False,
276
+ show_error=True,
277
+ quiet=False
278
+ )
 
279
 
280
  print("\n" + "=" * 70)
281
  print("βœ“ Interface is running!")