Eklavya73 commited on
Commit
3fbebe2
Β·
verified Β·
1 Parent(s): 42b486e

Upload 13 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Datasets/Domain-A_Dataset_Clean.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ .gradio/
4
+ *.log
5
+ /tmp/
Datasets/Domain-A_Dataset_Clean.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a75145c90f0d6dad33433e7fa996dae9941da0cda2065b2015b327e368a19c91
3
+ size 20014738
Models/db_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e22107cb0072e10e333853ec7e3c4fc376b53334b39288d74d7e2fe150646cc
3
+ size 135659648
Models/department_prototypes.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc14a6630d89e49213096a44ed0a5b80141849c32e84f6a797f7ca00e869bf51
3
+ size 31682
Models/hf_scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78585451f4491cf6d972debc92abb9115a1e36cf656045f1749d08d828011371
3
+ size 759
Models/mlb_tag_binarizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae4f9a6a39c6cd339154d5761b7d4d2d9cf9b2e9be0148bb4a95495a6e1a8057
3
+ size 1774
Models/priority_encoder.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cb01a7e21469b628fc277b2a60c89367615ef9199a15a270019f98eb8413f7f
3
+ size 567
Models/sbert_classifier.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcc0bcf076a4ef374c2e507c3680d349ae62933b3e00cead9fb2fdb9e294c00a
3
+ size 22859821
Models/tag_calibrators.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d35216a0cda5ac83785f722581d63c085dc28e570dc957a025c54a16456c28f
3
+ size 16329
Models/tuned_priority_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ffcb930933a60b6f7269efd5fa0d71a99567083cb986694f558bf4e37d2856d
3
+ size 10923685
README.md CHANGED
@@ -1,13 +1,36 @@
1
  ---
2
  title: Intelligent Ticket Auto-Routing System
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 6.10.0
8
  app_file: app.py
9
  pinned: false
10
- short_description: 'A Domain-Adaptive Multi-Label and Duplicate- Aware Routing '
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Intelligent Ticket Auto-Routing System
3
+ emoji: 🎫
4
+ colorFrom: indigo
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.23.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
+ # 🎫 Intelligent Ticket Auto-Routing System
14
+
15
+ An AI-powered support ticket routing system that automatically:
16
+
17
+ - **Classifies** tickets with multi-label tags
18
+ - **Routes** them to the correct department
19
+ - **Predicts** priority level
20
+ - **Detects** duplicate tickets using FAISS semantic search
21
+
22
+ ## How It Works
23
+
24
+ 1. Enter a support ticket description
25
+ 2. The system encodes it using Sentence-BERT (`all-mpnet-base-v2`)
26
+ 3. A calibrated classifier predicts relevant tags
27
+ 4. Department routing uses a hybrid of tag-voting + semantic similarity to department prototypes
28
+ 5. Priority is predicted using text features + embeddings
29
+ 6. FAISS index checks for duplicate tickets in the database
30
+
31
+ ## Tech Stack
32
+
33
+ - **Sentence-BERT** for semantic embeddings
34
+ - **FAISS** for fast similarity search
35
+ - **Scikit-learn** classifiers with isotonic calibration
36
+ - **Gradio** for the interactive UI
app.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Intelligent Ticket Auto-Routing System β€” Hugging Face Spaces App
3
+ ================================================================
4
+ Converts support tickets into structured routing decisions:
5
+ β€’ Multi-label tag classification
6
+ β€’ Department routing (hybrid: tag-voting + semantic similarity)
7
+ β€’ Priority prediction
8
+ β€’ Duplicate detection via FAISS
9
+ """
10
+
11
+ import csv
12
+ import os
13
+ import time
14
+ import uuid
15
+ from datetime import datetime
16
+ from pathlib import Path
17
+
18
+ import faiss
19
+ import gradio as gr
20
+ import joblib
21
+ import numpy as np
22
+ import pandas as pd
23
+ from sentence_transformers import SentenceTransformer
24
+ from sklearn.metrics.pairwise import cosine_similarity
25
+
26
+ # ── Paths ────────────────────────────────────────────────────────────────────
27
+ APP_DIR = Path(__file__).resolve().parent
28
+ MODEL_DIR = APP_DIR / "Models"
29
+ DATA_DIR = APP_DIR / "Datasets"
30
+ import tempfile
31
+ LOG_PATH = os.path.join(tempfile.gettempdir(), "routing_evaluation_log.csv")
32
+
33
+ # ── Load Models ──────────────────────────────────────────────────────────────
34
+ print("Loading SBERT model...")
35
+ sbert = SentenceTransformer("all-mpnet-base-v2")
36
+
37
+ print("Loading classifiers...")
38
+ tag_model = joblib.load(MODEL_DIR / "sbert_classifier.pkl")
39
+ tag_calibrators = joblib.load(MODEL_DIR / "tag_calibrators.pkl")
40
+
41
+ priority_bundle = joblib.load(MODEL_DIR / "tuned_priority_model.pkl")
42
+ priority_model = (
43
+ priority_bundle["model"]
44
+ if isinstance(priority_bundle, dict) and "model" in priority_bundle
45
+ else priority_bundle
46
+ )
47
+ priority_encoder = joblib.load(MODEL_DIR / "priority_encoder.pkl")
48
+ hf_scaler = joblib.load(MODEL_DIR / "hf_scaler.pkl")
49
+
50
+ tag_binarizer = joblib.load(MODEL_DIR / "mlb_tag_binarizer.pkl")
51
+ tag_list = list(tag_binarizer.classes_)
52
+
53
+ dept_prototypes = joblib.load(MODEL_DIR / "department_prototypes.pkl")
54
+
55
+ print(f"[OK] Tags: {len(tag_list)}, Departments: {len(dept_prototypes)}")
56
+
57
+ # ── Load Dataset & Build FAISS Index ─────────────────────────────────────────
58
+ print("Loading dataset and embeddings...")
59
+ df = pd.read_csv(DATA_DIR / "Domain-A_Dataset_Clean.csv")
60
+ embeddings = np.load(MODEL_DIR / "db_embeddings.npy").astype("float32")
61
+
62
+ index = faiss.IndexFlatIP(embeddings.shape[1])
63
+ faiss.normalize_L2(embeddings)
64
+ index.add(embeddings)
65
+
66
+ print(f"[OK] FAISS index: {index.ntotal} vectors")
67
+
68
+ # ── Duplicate Detection ──────────────────────────────────────────────────────
69
+ DUP_THRESHOLD = 0.7623
70
+ submitted_texts = list(df["text"].astype(str).tolist())
71
+
72
+
73
+ def check_duplicate(query_emb):
74
+ """Check if query is a duplicate of any ticket in the index."""
75
+ q = query_emb.astype("float32").reshape(1, -1).copy()
76
+ faiss.normalize_L2(q)
77
+
78
+ D, I = index.search(q, 20)
79
+ best_idx = int(I[0][0])
80
+ best_score = float(D[0][0])
81
+
82
+ if best_score >= DUP_THRESHOLD:
83
+ matched = (
84
+ submitted_texts[best_idx]
85
+ if best_idx < len(submitted_texts)
86
+ else "(unknown)"
87
+ )
88
+ return True, matched, best_score
89
+
90
+ return False, None, best_score
91
+
92
+
93
+ def register_ticket(query_emb, text):
94
+ """Add a new ticket to the FAISS index."""
95
+ v = query_emb.astype("float32").reshape(1, -1).copy()
96
+ faiss.normalize_L2(v)
97
+ index.add(v)
98
+ submitted_texts.append(text)
99
+
100
+
101
+ # ── Tag Prediction ───────────────────────────────────────────────────────────
102
+ def predict_tags(text, emb):
103
+ raw_probs = np.asarray(tag_model.predict_proba([emb])[0], dtype=float)
104
+ calibrated = np.array(raw_probs, dtype=float)
105
+
106
+ for i, cal in enumerate(tag_calibrators):
107
+ if cal is None:
108
+ continue
109
+ calibrated[i] = float(
110
+ cal.predict(np.asarray([raw_probs[i]], dtype=float))[0]
111
+ )
112
+
113
+ top_idx = calibrated.argsort()[-5:][::-1]
114
+ return top_idx, calibrated[top_idx], calibrated
115
+
116
+
117
+ # ── Priority Prediction ─────────────────────────────────────────────────────
118
+ def extract_features(text):
119
+ words = text.split()
120
+ return [
121
+ len(text),
122
+ len(words),
123
+ len(set(words)) / (len(words) + 1),
124
+ np.mean([len(w) for w in words]) if words else 0,
125
+ sum(w in text.lower() for w in ["urgent", "critical", "down"]),
126
+ sum(w in text.lower() for w in ["not", "cannot", "no"]),
127
+ ]
128
+
129
+
130
+ def predict_priority(text, emb):
131
+ features = extract_features(text)
132
+ features_scaled = hf_scaler.transform([features])
133
+ x = np.hstack([emb.reshape(1, -1), features_scaled])
134
+ pred_idx = int(priority_model.predict(x)[0])
135
+ return str(priority_encoder.classes_[pred_idx])
136
+
137
+
138
+ # ── Routing Engine ───────────────────────────────────────────────────────────
139
+ def route_ticket(emb, text):
140
+ tag_idx, top_probs, all_probs = predict_tags(text, emb)
141
+ vote_score = np.mean(top_probs)
142
+
143
+ best_dept, best_sim = None, -1
144
+ for dept, proto in dept_prototypes.items():
145
+ sim = cosine_similarity([emb], [proto])[0][0]
146
+ if sim > best_sim:
147
+ best_sim = sim
148
+ best_dept = dept
149
+
150
+ hybrid = 0.7 * vote_score + 0.3 * best_sim
151
+ threshold = np.clip(
152
+ np.mean(all_probs) + np.std(all_probs), 0.45, 0.70
153
+ )
154
+
155
+ if hybrid >= threshold:
156
+ mode, review = "AUTO_ROUTE", False
157
+ elif vote_score >= 0.40 and hybrid >= 0.40:
158
+ mode, review = "AUTO_ROUTE_VOTE", False
159
+ elif best_sim >= 0.65:
160
+ mode, review = "AUTO_ROUTE_SEMANTIC", False
161
+ elif hybrid >= 0.30:
162
+ mode, review = "AUTO_ROUTE_LOW_CONF", True
163
+ else:
164
+ mode, review = "HUMAN_REVIEW", True
165
+
166
+ priority = predict_priority(text, emb)
167
+ return mode, best_dept, priority, hybrid, review
168
+
169
+
170
+ # ── Logging ──────────────────────────────────────────────────────────────────
171
+ LOG_COLUMNS = [
172
+ "ticket_id", "timestamp", "ticket_text", "duplicate_flag",
173
+ "duplicate_score", "routing_mode", "department",
174
+ "department_confidence", "priority", "priority_confidence",
175
+ "selected_tags", "routing_score", "prediction_latency_ms", "explanation",
176
+ ]
177
+
178
+
179
+ def _ensure_log_header():
180
+ if not os.path.exists(LOG_PATH):
181
+ with open(LOG_PATH, "w", newline="", encoding="utf-8") as f:
182
+ csv.writer(f).writerow(LOG_COLUMNS)
183
+
184
+
185
+ def _append_log(row_dict):
186
+ _ensure_log_header()
187
+ with open(LOG_PATH, "a", newline="", encoding="utf-8") as f:
188
+ csv.writer(f).writerow([row_dict.get(c, "") for c in LOG_COLUMNS])
189
+
190
+
191
+ # ── Main Processing Pipeline ────────────────────────────────────────────────
192
+ def process_ticket(text):
193
+ t0 = time.time()
194
+ ticket_id = str(uuid.uuid4())[:8]
195
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
196
+
197
+ emb = sbert.encode(text)
198
+
199
+ # Duplicate detection
200
+ is_dup, dup_text, dup_score = check_duplicate(emb)
201
+
202
+ # Routing
203
+ mode, dept, priority, conf, review = route_ticket(emb, text)
204
+
205
+ latency_ms = round((time.time() - t0) * 1000, 2)
206
+
207
+ # Tags for logging
208
+ tag_idx, top_probs, _ = predict_tags(text, emb)
209
+ tag_summary = ", ".join(
210
+ f"{tag_list[idx]} ({top_probs[j]:.2f})"
211
+ for j, idx in enumerate(tag_idx[:3])
212
+ )
213
+
214
+ if is_dup:
215
+ routing_mode = "DUPLICATE_CHAIN"
216
+ explanation = (
217
+ f"Duplicate detected (score={dup_score:.4f}). "
218
+ f"Original: {str(dup_text)[:100]}"
219
+ )
220
+ result = {
221
+ "ticket_id": ticket_id,
222
+ "status": "⚠️ DUPLICATE",
223
+ "route": "DUPLICATE_CHAIN",
224
+ "department": dept,
225
+ "priority": priority,
226
+ "confidence": round(float(dup_score), 3),
227
+ "review": False,
228
+ "tags": tag_summary,
229
+ "message": f"Duplicate of: {str(dup_text)[:200]}",
230
+ "latency": latency_ms,
231
+ }
232
+ else:
233
+ routing_mode = mode
234
+ explanation = (
235
+ f"Ticket routed to {dept} because predicted tags "
236
+ f"[{tag_summary}] map to the {dept} department. "
237
+ f"Routing mode: {mode}, Score: {conf:.3f}"
238
+ )
239
+ result = {
240
+ "ticket_id": ticket_id,
241
+ "status": "βœ… NOT DUPLICATE",
242
+ "route": mode,
243
+ "department": dept,
244
+ "priority": priority,
245
+ "confidence": round(float(conf), 3),
246
+ "review": review,
247
+ "tags": tag_summary,
248
+ "message": "Ticket processed successfully",
249
+ "latency": latency_ms,
250
+ }
251
+
252
+ # Register & log
253
+ register_ticket(emb, text)
254
+ _append_log({
255
+ "ticket_id": ticket_id,
256
+ "timestamp": timestamp,
257
+ "ticket_text": text,
258
+ "duplicate_flag": is_dup,
259
+ "duplicate_score": round(float(dup_score), 4),
260
+ "routing_mode": routing_mode,
261
+ "department": dept,
262
+ "department_confidence": round(float(conf), 4),
263
+ "priority": priority,
264
+ "priority_confidence": "",
265
+ "selected_tags": tag_summary,
266
+ "routing_score": round(float(conf), 4),
267
+ "prediction_latency_ms": latency_ms,
268
+ "explanation": explanation,
269
+ })
270
+
271
+ return result
272
+
273
+
274
+ # ── Gradio UI Handler ──────────────────────��─────────────────────────────────
275
+ def ui_process(text):
276
+ if not text or not text.strip():
277
+ return (
278
+ "⚠️ Please enter ticket text",
279
+ "", "", "", "", "", "", "", ""
280
+ )
281
+
282
+ r = process_ticket(text.strip())
283
+
284
+ # Confidence bar (visual)
285
+ conf_pct = int(r["confidence"] * 100)
286
+
287
+ # Review badge
288
+ review_badge = "πŸ”΄ Yes β€” Manual review recommended" if r["review"] else "🟒 No"
289
+
290
+ # Priority with emoji
291
+ priority_map = {
292
+ "critical": "πŸ”΄ Critical",
293
+ "high": "🟠 High",
294
+ "medium": "🟑 Medium",
295
+ "low": "🟒 Low",
296
+ }
297
+ priority_display = priority_map.get(
298
+ r["priority"].lower(), r["priority"]
299
+ )
300
+
301
+ # Route mode with emoji
302
+ route_map = {
303
+ "AUTO_ROUTE": "⚑ Auto-Routed",
304
+ "AUTO_ROUTE_VOTE": "⚑ Auto-Routed (Tag Vote)",
305
+ "AUTO_ROUTE_SEMANTIC": "⚑ Auto-Routed (Semantic)",
306
+ "AUTO_ROUTE_LOW_CONF": "⚠️ Auto-Routed (Low Confidence)",
307
+ "HUMAN_REVIEW": "πŸ§‘β€πŸ’Ό Human Review Required",
308
+ "DUPLICATE_CHAIN": "πŸ”— Duplicate Chain",
309
+ }
310
+ route_display = route_map.get(r["route"], r["route"])
311
+
312
+ # Department display
313
+ dept_display = r["department"].replace("_", " ")
314
+
315
+ return (
316
+ r["status"],
317
+ f"🎫 {r['ticket_id']}",
318
+ route_display,
319
+ f"🏒 {dept_display}",
320
+ priority_display,
321
+ f"{conf_pct}%",
322
+ r["tags"],
323
+ review_badge,
324
+ r["message"],
325
+ )
326
+
327
+
328
+ # ── Custom CSS ───────────────────────────────────────────────────────────────
329
+ CSS = """
330
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
331
+
332
+ * { font-family: 'Inter', sans-serif !important; }
333
+
334
+ .gradio-container {
335
+ max-width: 960px !important;
336
+ margin: 0 auto !important;
337
+ }
338
+
339
+ /* Header */
340
+ .app-header {
341
+ text-align: center;
342
+ padding: 1.5rem 1rem;
343
+ background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 50%, #a855f7 100%);
344
+ border-radius: 16px;
345
+ margin-bottom: 1.5rem;
346
+ box-shadow: 0 8px 32px rgba(79, 70, 229, 0.3);
347
+ }
348
+ .app-header h1 {
349
+ color: white !important;
350
+ font-size: 1.75rem !important;
351
+ font-weight: 700 !important;
352
+ margin: 0 !important;
353
+ letter-spacing: -0.02em;
354
+ }
355
+ .app-header p {
356
+ color: rgba(255,255,255,0.85) !important;
357
+ font-size: 0.95rem !important;
358
+ margin: 0.4rem 0 0 0 !important;
359
+ }
360
+
361
+ /* Cards */
362
+ .result-card {
363
+ background: linear-gradient(145deg, rgba(255,255,255,0.05), rgba(255,255,255,0.02));
364
+ border: 1px solid rgba(255,255,255,0.1);
365
+ border-radius: 12px;
366
+ padding: 0.25rem;
367
+ }
368
+
369
+ /* Status indicators */
370
+ .status-box textarea, .status-box input {
371
+ font-weight: 600 !important;
372
+ font-size: 1rem !important;
373
+ }
374
+
375
+ /* Submit button */
376
+ .submit-btn {
377
+ background: linear-gradient(135deg, #4f46e5, #7c3aed) !important;
378
+ border: none !important;
379
+ color: white !important;
380
+ font-weight: 600 !important;
381
+ font-size: 1rem !important;
382
+ padding: 0.75rem 2rem !important;
383
+ border-radius: 10px !important;
384
+ box-shadow: 0 4px 16px rgba(79, 70, 229, 0.4) !important;
385
+ transition: all 0.3s ease !important;
386
+ }
387
+ .submit-btn:hover {
388
+ transform: translateY(-2px) !important;
389
+ box-shadow: 0 6px 24px rgba(79, 70, 229, 0.5) !important;
390
+ }
391
+
392
+ /* Clear button */
393
+ .clear-btn {
394
+ border: 1px solid rgba(255,255,255,0.2) !important;
395
+ border-radius: 10px !important;
396
+ font-weight: 500 !important;
397
+ }
398
+
399
+ /* Stats footer */
400
+ .stats-row {
401
+ text-align: center;
402
+ padding: 0.75rem;
403
+ background: rgba(79, 70, 229, 0.08);
404
+ border-radius: 10px;
405
+ margin-top: 0.5rem;
406
+ font-size: 0.85rem;
407
+ color: #a5b4fc;
408
+ }
409
+
410
+ footer { display: none !important; }
411
+ """
412
+
413
+ # ── Example Tickets ──────────────────────────────────────────────────────────
414
+ EXAMPLES = [
415
+ ["My laptop screen is flickering and sometimes goes completely black. I've tried restarting but the issue persists after login."],
416
+ ["I cannot access the company VPN from my home network. It keeps showing authentication failed error even though my password is correct."],
417
+ ["We need to upgrade our database server as the current one is running out of storage space and response times have increased significantly."],
418
+ ["I was charged twice for my last month's subscription. Please process a refund for the duplicate charge."],
419
+ ["The email server has been down since this morning. No one in the office can send or receive emails. This is critical!"],
420
+ ["Can you provide training materials for the new CRM software that was deployed last week?"],
421
+ ]
422
+
423
+ # ── Build UI ─────────────────────────────────��───────────────────────────────
424
+ with gr.Blocks(css=CSS, theme=gr.themes.Soft(primary_hue="indigo", neutral_hue="slate"), title="Ticket Auto-Routing System") as app:
425
+
426
+ # Header
427
+ gr.HTML("""
428
+ <div class="app-header">
429
+ <h1>🎫 Intelligent Ticket Auto-Routing System</h1>
430
+ <p>AI-powered ticket classification, routing, priority prediction & duplicate detection</p>
431
+ </div>
432
+ """)
433
+
434
+ with gr.Row():
435
+ # ── Left: Input ──
436
+ with gr.Column(scale=1):
437
+ ticket_input = gr.Textbox(
438
+ label="πŸ“ Ticket Description",
439
+ placeholder="Describe the support issue in detail...",
440
+ lines=6,
441
+ max_lines=12,
442
+ )
443
+ with gr.Row():
444
+ submit_btn = gr.Button("πŸš€ Process Ticket", variant="primary", elem_classes=["submit-btn"])
445
+ clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear", elem_classes=["clear-btn"])
446
+
447
+ gr.Examples(
448
+ examples=EXAMPLES,
449
+ inputs=ticket_input,
450
+ label="πŸ’‘ Try these examples",
451
+ )
452
+
453
+ # ── Right: Results ──
454
+ with gr.Column(scale=1):
455
+ with gr.Group(elem_classes=["result-card"]):
456
+ dup_status = gr.Textbox(label="πŸ” Duplicate Status", interactive=False, elem_classes=["status-box"])
457
+ ticket_id = gr.Textbox(label="πŸ†” Ticket ID", interactive=False)
458
+
459
+ with gr.Group(elem_classes=["result-card"]):
460
+ with gr.Row():
461
+ route_mode = gr.Textbox(label="πŸ›€οΈ Routing Mode", interactive=False)
462
+ department = gr.Textbox(label="🏒 Department", interactive=False)
463
+ with gr.Row():
464
+ priority = gr.Textbox(label="⚑ Priority", interactive=False)
465
+ confidence = gr.Textbox(label="πŸ“Š Confidence", interactive=False)
466
+
467
+ with gr.Group(elem_classes=["result-card"]):
468
+ tags = gr.Textbox(label="🏷️ Predicted Tags", interactive=False)
469
+ needs_review = gr.Textbox(label="πŸ‘€ Needs Review", interactive=False)
470
+ message = gr.Textbox(label="πŸ’¬ Details", interactive=False, lines=2)
471
+
472
+ gr.HTML(f"""
473
+ <div class="stats-row">
474
+ πŸ“Š Database: <strong>{index.ntotal:,}</strong> tickets indexed
475
+ &nbsp;β€’&nbsp;
476
+ 🏷️ <strong>{len(tag_list)}</strong> tag categories
477
+ &nbsp;β€’&nbsp;
478
+ 🏒 <strong>{len(dept_prototypes)}</strong> departments
479
+ </div>
480
+ """)
481
+
482
+ # ── Wire events ──
483
+ outputs = [dup_status, ticket_id, route_mode, department, priority, confidence, tags, needs_review, message]
484
+
485
+ submit_btn.click(fn=ui_process, inputs=ticket_input, outputs=outputs)
486
+ ticket_input.submit(fn=ui_process, inputs=ticket_input, outputs=outputs)
487
+ clear_btn.add([ticket_input] + outputs)
488
+
489
+
490
+ # ── Launch ───────────────────────────────────────────────────────────────────
491
+ if __name__ == "__main__":
492
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=5.0.0
2
+ sentence-transformers>=2.2.0
3
+ faiss-cpu>=1.7.0
4
+ scikit-learn==1.5.1
5
+ numpy>=1.24.0
6
+ pandas>=2.0.0
7
+ joblib>=1.3.0
8
+ xgboost>=2.0.0
9
+ lightgbm>=4.0.0