zafirabdullah12 commited on
Commit
9624bb7
Β·
verified Β·
1 Parent(s): bde5aa7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -69
app.py CHANGED
@@ -3,18 +3,22 @@ import os
3
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Force TensorFlow to use CPU only
4
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
5
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress all TensorFlow messages
6
- os.environ['GRADIO_HOT_RELOAD'] = 'false' # Disable Gradio hot reload to avoid the error # You have no Nvidia GPU and Cuda
7
  os.environ['WRAPT_DISABLE_EXTENSIONS'] = 'true'
8
 
9
  import re
10
  import nltk
11
  import pickle
12
  import string
 
13
  import numpy as np
14
  import gradio as gr
15
  from keras.models import load_model
16
  from keras.preprocessing.sequence import pad_sequences
17
 
 
 
 
18
  # Constants
19
  MAX_LEN = 100
20
  MODEL_PATH = "sentiment_analysis_best.keras"
@@ -24,50 +28,21 @@ nltk.download('stopwords', quiet=True)
24
 
25
  # Expand common English contractions
26
  def expand_contractions(text):
27
- """Expand common English contractions"""
28
  contractions = {
29
- "i'm": "i am",
30
- "you're": "you are",
31
- "he's": "he is",
32
- "she's": "she is",
33
- "it's": "it is",
34
- "we're": "we are",
35
- "they're": "they are",
36
- "i've": "i have",
37
- "you've": "you have",
38
- "we've": "we have",
39
- "they've": "they have",
40
- "i'll": "i will",
41
- "you'll": "you will",
42
- "he'll": "he will",
43
- "she'll": "she will",
44
- "we'll": "we will",
45
- "they'll": "they will",
46
- "i'd": "i would",
47
- "you'd": "you would",
48
- "he'd": "he would",
49
- "she'd": "she would",
50
- "we'd": "we would",
51
- "they'd": "they would",
52
- "don't": "do not",
53
- "doesn't": "does not",
54
- "didn't": "did not",
55
- "can't": "cannot",
56
- "couldn't": "could not",
57
- "won't": "will not",
58
- "wouldn't": "would not",
59
- "shouldn't": "should not",
60
- "isn't": "is not",
61
- "aren't": "are not",
62
- "wasn't": "was not",
63
- "weren't": "were not",
64
- "hasn't": "has not",
65
- "haven't": "have not",
66
- "hadn't": "had not",
67
- "mightn't": "might not",
68
- "mustn't": "must not",
69
- "needn't": "need not",
70
- "shan't": "shall not"
71
  }
72
  for contraction, expansion in contractions.items():
73
  text = re.sub(r'\b' + contraction + r'\b', expansion, text, flags=re.IGNORECASE)
@@ -118,7 +93,7 @@ def load_resources():
118
  return model, tokenizer
119
  except FileNotFoundError as e:
120
  print(f"βœ— Error: Model or Tokenizer file not found!")
121
- print(f" Make sure {MODEL_PATH} AND {TOKENIZER_PATH} are in the same directory.")
122
  raise e
123
  except Exception as e:
124
  print(f"βœ— Error loading resources: {e}")
@@ -157,12 +132,16 @@ def predict_sentiment(text):
157
  # Create detailed results
158
  detailed_results = f"""
159
  ### πŸ“Š Detailed Analysis:
 
160
  **Original Text:** {text}
 
161
  **Processed Text:** {processed_text}
 
162
  **Prediction Probabilities:**
163
  - 😞 Negative: {pred[0][0] * 100:.2f}%
164
  - 😊 Positive: {pred[0][1] * 100:.2f}%
165
  - 😐 Neutral: {pred[0][2] * 100:.2f}%
 
166
  **Final Sentiment:** {sentiment}
167
  **Confidence:** {confidence_percentage}
168
  """
@@ -195,7 +174,7 @@ def create_gradio_interface():
195
  ["I'm okay"]
196
  ]
197
 
198
- # Create interface - theme removed to avoid Gradio 6.0 warning
199
  with gr.Blocks(title="Sentiment Analysis") as interface:
200
  # Header
201
  gr.Markdown("""
@@ -288,30 +267,18 @@ if __name__ == "__main__":
288
  interface = create_gradio_interface()
289
 
290
  # Launch with configuration
291
- try:
292
- interface.launch(
293
- server_name="0.0.0.0",
294
- server_port=7860,
295
- share=False,
296
- show_error=True,
297
- ssr_mode=False,
298
- debug=True,
299
- theme=gr.themes.Soft()
300
- )
301
- except Exception as e:
302
- print("Server shutdown cleanup warning ignored:", e)
303
- # interface.launch(
304
- # server_name="0.0.0.0",
305
- # server_port=7860,
306
- # share=False,
307
- # show_error=True,
308
- # ssr_mode=False,
309
- # debug=True,
310
- # theme=gr.themes.Soft()
311
- # )
312
 
313
  print("\n" + "=" * 70)
314
  print("βœ“ Interface is running!")
315
- print(" Local URL: http://localhost:7860")
316
- print(" Press Ctrl+C to stop the server")
317
  print("=" * 70)
 
3
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Force TensorFlow to use CPU only
4
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
5
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress all TensorFlow messages
6
+ os.environ['GRADIO_HOT_RELOAD'] = 'false' # Disable Gradio hot reload
7
  os.environ['WRAPT_DISABLE_EXTENSIONS'] = 'true'
8
 
9
  import re
10
  import nltk
11
  import pickle
12
  import string
13
+ import warnings
14
  import numpy as np
15
  import gradio as gr
16
  from keras.models import load_model
17
  from keras.preprocessing.sequence import pad_sequences
18
 
19
+ # Suppress warnings
20
+ warnings.filterwarnings('ignore')
21
+
22
  # Constants
23
  MAX_LEN = 100
24
  MODEL_PATH = "sentiment_analysis_best.keras"
 
28
 
29
  # Expand common English contractions
30
  def expand_contractions(text):
 
31
  contractions = {
32
+ "i'm": "i am", "you're": "you are", "he's": "he is",
33
+ "she's": "she is", "it's": "it is", "we're": "we are",
34
+ "they're": "they are", "i've": "i have", "you've": "you have",
35
+ "we've": "we have", "they've": "they have", "i'll": "i will",
36
+ "you'll": "you will", "he'll": "he will", "she'll": "she will",
37
+ "we'll": "we will", "they'll": "they will", "i'd": "i would",
38
+ "you'd": "you would", "he'd": "he would", "she'd": "she would",
39
+ "we'd": "we would", "they'd": "they would", "don't": "do not",
40
+ "doesn't": "does not", "didn't": "did not", "can't": "cannot",
41
+ "couldn't": "could not", "won't": "will not", "wouldn't": "would not",
42
+ "shouldn't": "should not", "isn't": "is not", "aren't": "are not",
43
+ "wasn't": "was not", "weren't": "were not", "hasn't": "has not",
44
+ "haven't": "have not", "hadn't": "had not", "mightn't": "might not",
45
+ "mustn't": "must not", "needn't": "need not", "shan't": "shall not"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  }
47
  for contraction, expansion in contractions.items():
48
  text = re.sub(r'\b' + contraction + r'\b', expansion, text, flags=re.IGNORECASE)
 
93
  return model, tokenizer
94
  except FileNotFoundError as e:
95
  print(f"βœ— Error: Model or Tokenizer file not found!")
96
+ print(f" Make sure {MODEL_PATH} AND {TOKENIZER_PATH} are in the same directory.")
97
  raise e
98
  except Exception as e:
99
  print(f"βœ— Error loading resources: {e}")
 
132
  # Create detailed results
133
  detailed_results = f"""
134
  ### πŸ“Š Detailed Analysis:
135
+
136
  **Original Text:** {text}
137
+
138
  **Processed Text:** {processed_text}
139
+
140
  **Prediction Probabilities:**
141
  - 😞 Negative: {pred[0][0] * 100:.2f}%
142
  - 😊 Positive: {pred[0][1] * 100:.2f}%
143
  - 😐 Neutral: {pred[0][2] * 100:.2f}%
144
+
145
  **Final Sentiment:** {sentiment}
146
  **Confidence:** {confidence_percentage}
147
  """
 
174
  ["I'm okay"]
175
  ]
176
 
177
+ # Create interface
178
  with gr.Blocks(title="Sentiment Analysis") as interface:
179
  # Header
180
  gr.Markdown("""
 
267
  interface = create_gradio_interface()
268
 
269
  # Launch with configuration
270
+ interface.launch(
271
+ server_name="0.0.0.0",
272
+ server_port=7860,
273
+ share=False,
274
+ show_error=True,
275
+ ssr_mode=False,
276
+ theme=gr.themes.Soft(),
277
+ quiet=True # Suppress server messages
278
+ )
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
  print("\n" + "=" * 70)
281
  print("βœ“ Interface is running!")
282
+ print(" Local URL: http://localhost:7860")
283
+ print(" Press Ctrl+C to stop the server")
284
  print("=" * 70)