koushikvkr484 commited on
Commit
bcc174f
·
verified ·
1 Parent(s): 51b92e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -137
app.py CHANGED
@@ -8,27 +8,24 @@ import streamlit as st
8
  import tensorflow as tf
9
  from nltk.corpus import stopwords
10
  from nltk.tokenize import word_tokenize
 
11
  from tensorflow.keras.preprocessing.sequence import pad_sequences
12
- # Note: Tokenizer import is not strictly needed here since it's loaded from file,
13
- # but it was in your original code, so it is kept for completeness.
14
- # from tensorflow.keras.preprocessing.text import Tokenizer
15
 
16
- ## -----------------------------
17
- ## 📦 Setup and Configuration
18
- ## -----------------------------
19
 
20
- # Use TensorFlow's legacy loader for compatibility
21
- load_model = tf.keras.models.load_model # IMPORTANT for older Streamlit/TensorFlow versions
 
 
22
 
23
- # English stop words list
24
- stop_english = set(stopwords.words('english'))
25
 
26
- # Custom NLTK directory setup
 
 
 
27
  NLTK_DIR = os.path.join(os.getcwd(), "nltk_data")
28
  os.makedirs(NLTK_DIR, exist_ok=True)
29
  nltk.data.path.append(NLTK_DIR)
30
 
31
- # Download NLTK resources if missing
32
  try:
33
  nltk.data.find('tokenizers/punkt')
34
  except LookupError:
@@ -39,150 +36,108 @@ try:
39
  except LookupError:
40
  nltk.download('stopwords', download_dir=NLTK_DIR)
41
 
42
- # Configuration must be set before Streamlit components are defined
43
- st.set_page_config(
44
- page_title="Ticket Classification App",
45
- layout="centered",
46
- initial_sidebar_state="auto"
47
- )
48
-
49
- ## -----------------------------
50
- ## ⚙️ Load Model, Tokenizer, and Encoders
51
- ## -----------------------------
52
-
53
- @st.cache_resource
54
- def load_assets():
55
- """Loads all necessary machine learning assets."""
56
- try:
57
- # Load Model
58
- model_path = "model.h5"
59
- model = load_model(model_path, compile=False)
60
-
61
- # Load Encoders
62
- with open("le_type.pkl", "rb") as f:
63
- le_type = pickle.load(f)
64
- with open("le_queue.pkl", "rb") as f:
65
- le_queue = pickle.load(f)
66
- with open("mlb.pkl", "rb") as f:
67
- mlb = pickle.load(f)
68
-
69
- # Load Tokenizer
70
- with open("tokenizer.pkl", "rb") as f:
71
- tokenizer = pickle.load(f)
72
-
73
- return model, le_type, le_queue, mlb, tokenizer
 
 
 
 
74
 
75
- except FileNotFoundError as e:
76
- st.error(f"Missing required file: {e.filename}. Please ensure 'model.h5', 'tokenizer.pkl', 'le_type.pkl', 'le_queue.pkl', and 'mlb.pkl' are in the same directory.")
77
- st.stop()
78
- except Exception as e:
79
- st.error(f"An error occurred during asset loading: {e}")
80
- st.stop()
81
 
82
- model, le_type, le_queue, mlb, tokenizer = load_assets()
83
- MAX_SEQ_LEN = 107 # MUST match training parameter
84
 
85
- ## -----------------------------
86
- ## 🧼 Text Cleaning and Preparation Functions
87
- ## -----------------------------
88
 
 
 
 
89
  def clean_text(t):
90
- """Performs text cleaning including lowercasing, stop word removal, and regex cleaning."""
91
- if pd.isna(t) or t is None:
92
  return ""
93
-
94
- t = str(t).lower()
95
-
96
- # Tokenization and Stop Word Removal
97
  tokens = word_tokenize(t)
98
  tokens = [w for w in tokens if w not in stop_english and len(w) > 2]
99
  t = " ".join(tokens)
100
-
101
- # Regex cleaning
102
- t = re.sub(r"<.*?>", " ", t) # Remove HTML tags
103
- t = re.sub(r"\\n", " ", t) # Remove literal \n
104
- t = re.sub(r"http\S+|www\.\S+", " ", t) # Remove URLs
105
- t = re.sub(r"\S+@\S+", " ", t) # Remove emails
106
- # Remove various punctuation and special characters
107
- t = re.sub(r"[%\[\]_\\<\(\]#\?\'\":\)\-\;\+\!\/,>\.\n\r]", " ", t)
108
- t = re.sub(r"\s+", " ", t).strip() # Collapse multiple spaces and trim
109
-
110
  return t
111
 
 
 
 
 
112
  def convert_to_sequence(txt):
113
- """Converts cleaned text to a padded sequence."""
114
- seq = tokenizer.texts_to_sequences([txt]) # Tokenizer expects a list
115
  padded = pad_sequences(seq, maxlen=MAX_SEQ_LEN, padding="pre", truncating="pre")
116
  return padded
117
 
118
- ## -----------------------------
119
- ## 🖥️ Streamlit UI
120
- ## -----------------------------
121
 
122
- st.title("🎫 Ticket Classification App")
123
- st.markdown("Enter the subject and body of a support ticket to predict its **Type**, **Queue**, and **Tags**.")
124
 
125
- # Example text display (for context/help)
126
- st.subheader("Example Input")
127
- st.code("""Subject: Account Disruption
128
- Body: Dear Customer Support Team, I am writing to report a significant problem with the centralized account management portal...""")
129
- st.markdown("---")
130
 
 
 
131
 
132
- col1, col2 = st.columns(2)
133
 
134
- with col1:
135
- subject = st.text_input("Enter the **Subject**:", key="subject_input")
136
- with col2:
137
- body = st.text_area("Enter the **Body**:", key="body_input", height=100)
138
 
139
- ## -----------------------------
140
- ## 🚀 Prediction Logic
141
- ## -----------------------------
142
 
143
- if st.button("Submit for Classification"):
144
-
145
- if not subject and not body:
146
- st.warning("Please enter some text in the Subject or Body fields to submit.")
147
- else:
148
- with st.spinner('Classifying ticket...'):
149
- # Combine and Clean Text
150
- raw_text = subject + " " + body
151
- cleaned = clean_text(raw_text)
152
-
153
- if not cleaned:
154
- st.warning("The input text was empty or contained only stop words/punctuation after cleaning.")
155
- else:
156
- # Convert to Sequence
157
- seq = convert_to_sequence(cleaned)
158
-
159
- # Make Prediction
160
- preds = model.predict(seq, verbose=0)
161
- pred_type_probs, pred_queue_probs, pred_tags_probs = preds
162
-
163
- # Decode single-label outputs
164
- pred_type = le_type.inverse_transform([np.argmax(pred_type_probs)])
165
- pred_queue = le_queue.inverse_transform([np.argmax(pred_queue_probs)])
166
-
167
- # Decode multi-label outputs
168
- pred_tags_binary = (pred_tags_probs >= 0.5).astype(int)
169
- pred_tags = mlb.inverse_transform(pred_tags_binary)
170
-
171
- # --- Display Results ---
172
- st.success("Classification Complete!")
173
-
174
- st.markdown("### Predicted Categories")
175
- st.write(f"**Type:** `{pred_type[0]}`")
176
- st.write(f"**Queue:** `{pred_queue[0]}`")
177
-
178
- if pred_tags and pred_tags[0]:
179
- st.write(f"**Tags:** `{'`, `'.join(pred_tags[0])}`")
180
- else:
181
- st.write("**Tags:** *No tags predicted (below threshold)*")
182
-
183
- st.markdown("---")
184
- st.markdown("### Preprocessing Details")
185
- st.write("**Cleaned Text:**", cleaned)
186
- # Optional: Show a preview of the probability scores for debugging
187
- # st.write("Type Confidence:", np.max(pred_type_probs))
188
- # st.write("Queue Confidence:", np.max(pred_queue_probs))
 
8
  import tensorflow as tf
9
  from nltk.corpus import stopwords
10
  from nltk.tokenize import word_tokenize
11
+ from tensorflow.keras.preprocessing.text import Tokenizer
12
  from tensorflow.keras.preprocessing.sequence import pad_sequences
 
 
 
13
 
 
 
 
14
 
15
+ # -----------------------------
16
+ # Use TensorFlow's legacy loader
17
+ # -----------------------------
18
+ load_model = tf.keras.models.load_model # IMPORTANT
19
 
 
 
20
 
21
+ # -----------------------------
22
+ # NLTK Requirements
23
+ # -----------------------------
24
+ # Custom NLTK directory
25
  NLTK_DIR = os.path.join(os.getcwd(), "nltk_data")
26
  os.makedirs(NLTK_DIR, exist_ok=True)
27
  nltk.data.path.append(NLTK_DIR)
28
 
 
29
  try:
30
  nltk.data.find('tokenizers/punkt')
31
  except LookupError:
 
36
  except LookupError:
37
  nltk.download('stopwords', download_dir=NLTK_DIR)
38
 
39
+ # Load stopwords NOW
40
+ stop_english = set(stopwords.words("english"))
41
+
42
+ # -----------------------------
43
+ # Example text
44
+ # -----------------------------
45
+ st.write("Account Disruption")
46
+ st.write("""Dear Customer Support Team,
47
+ I am writing to report a significant problem with the centralized account management portal...
48
+ """)
49
+
50
+ # -----------------------------
51
+ # Streamlit UI
52
+ # -----------------------------
53
+ st.title("Ticket Classification App")
54
+
55
+ col1, col2 = st.columns(2)
56
+ with col1:
57
+ subject = st.text_input("Enter your subject:")
58
+ with col2:
59
+ body = st.text_input("Enter your body:")
60
+
61
+ # -----------------------------
62
+ # Load Model
63
+ # -----------------------------
64
+ model_path = "model.h5"
65
+ model = load_model(model_path, compile=False) # <- works on HF
66
+
67
+ with open("le_type.pkl", "rb") as f:
68
+ le_type = pickle.load(f)
69
+
70
+ with open("le_queue.pkl", "rb") as f:
71
+ le_queue = pickle.load(f)
72
+
73
+ with open("mlb.pkl", "rb") as f:
74
+ mlb = pickle.load(f)
75
 
76
+ # -----------------------------
77
+ # Load Tokenizer
78
+ # -----------------------------
79
+ with open("tokenizer.pkl", "rb") as f:
80
+ tokenizer = pickle.load(f)
 
81
 
82
+ MAX_SEQ_LEN = 107 # MUST match training
 
83
 
 
 
 
84
 
85
+ # -----------------------------
86
+ # Clean Text
87
+ # -----------------------------
88
  def clean_text(t):
89
+ if pd.isna(t):
 
90
  return ""
91
+
92
+ t = t.lower()
 
 
93
  tokens = word_tokenize(t)
94
  tokens = [w for w in tokens if w not in stop_english and len(w) > 2]
95
  t = " ".join(tokens)
96
+
97
+ # regex cleaning
98
+ t = re.sub(r"<.*?>", " ", t)
99
+ t = re.sub(r"\\n", " ", t)
100
+ t = re.sub(r"http\S+|www\.\S+", " ", t)
101
+ t = re.sub(r"\S+@\S+", " ", t)
102
+ t = re.sub(r"[%\[\]_\\<\(\]#\?\'\":\)\-\;\+\!\/,>\.\n\r]", " ", t)
103
+ t = re.sub(r"\s+", " ", t).strip()
104
+
 
105
  return t
106
 
107
+
108
+ # -----------------------------
109
+ # Convert Text → Sequence
110
+ # -----------------------------
111
  def convert_to_sequence(txt):
112
+ seq = tokenizer.texts_to_sequences([txt]) # must be list
 
113
  padded = pad_sequences(seq, maxlen=MAX_SEQ_LEN, padding="pre", truncating="pre")
114
  return padded
115
 
 
 
 
116
 
 
 
117
 
118
+ # -----------------------------
119
+ # Prediction
120
+ # -----------------------------
121
+ if st.button("Submit"):
122
+ raw_text = subject + " " + body
123
 
124
+ cleaned = clean_text(raw_text)
125
+ st.write("Cleaned Text:", cleaned)
126
 
127
+ seq = convert_to_sequence(cleaned)
128
 
129
+ preds = model.predict(seq)
 
 
 
130
 
131
+ pred_type_probs, pred_queue_probs, pred_tags_probs = preds
 
 
132
 
133
+ # Decode single-label outputs
134
+ pred_type = le_type.inverse_transform([np.argmax(pred_type_probs)])
135
+ pred_queue = le_queue.inverse_transform([np.argmax(pred_queue_probs)])
136
+
137
+ # Decode multi-label outputs
138
+ pred_tags_binary = (pred_tags_probs >= 0.5).astype(int)
139
+ pred_tags = mlb.inverse_transform(pred_tags_binary)
140
+
141
+ st.write("Predicted Type:", pred_type[0])
142
+ st.write("Predicted Queue:", pred_queue[0])
143
+ st.write("Predicted Tags:", pred_tags)