Abhishek Singh commited on
Commit
dcafbca
Β·
1 Parent(s): 8f7dbb0

add app.py, update README.md and create requirements.txt files

Browse files
Files changed (3) hide show
  1. README.md +7 -7
  2. app.py +237 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -1,14 +1,14 @@
1
  ---
2
- title: MagicSupport Customer Intent Classifier
3
- emoji: 🐒
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 6.9.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
- short_description: This space hosts the production-grade intent classification.
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
+ title: MagicSupport Intent Classifier
3
+ emoji: 🎧
4
+ colorFrom: violet
5
+ colorTo: slate
6
  sdk: gradio
7
+ sdk_version: 4.20.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
11
  ---
12
 
13
+ # MagicSupport Intent Classifier
14
+ This space hosts the production-grade intent classification pipeline for the `learn-abc/magicSupport-intent-classifier` model, optimized for automated customer support routing.
app.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import logging
4
+ import torch.nn.functional as F
5
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
6
+
7
+ # Configure professional logging
8
+ logging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s', level=logging.INFO)
9
+ logger = logging.getLogger(__name__)
10
+
11
+ class MagicSupportClassifier:
12
+ """
13
+ Encapsulates the customer support intent classification model.
14
+ Engineered for dynamic label resolution and rapid inference.
15
+ """
16
+ def __init__(self, model_id: str = "learn-abc/magicSupport-intent-classifier"):
17
+ self.model_id = model_id
18
+ self.max_length = 128
19
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+ self._load_model()
21
+
22
+ def _load_model(self):
23
+ logger.info(f"Initializing model {self.model_id} on {self.device}...")
24
+ try:
25
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
26
+ self.model = AutoModelForSequenceClassification.from_pretrained(self.model_id)
27
+ self.model.to(self.device)
28
+ self.model.eval()
29
+
30
+ # Extract number of classes dynamically
31
+ self.num_classes = len(self.model.config.id2label)
32
+ logger.info(f"Model loaded successfully with {self.num_classes} intent classes.")
33
+ except Exception as e:
34
+ logger.error(f"Failed to load model: {e}")
35
+ raise
36
+
37
+ def _get_iconography(self, label: str) -> str:
38
+ """
39
+ Dynamically assigns UI icons based on intent keywords.
40
+ Future-proofs the application against retrained label sets.
41
+ """
42
+ label_lower = label.lower()
43
+ if "order" in label_lower or "delivery" in label_lower or "track" in label_lower:
44
+ return "πŸ“¦"
45
+ if "refund" in label_lower or "payment" in label_lower or "invoice" in label_lower or "fee" in label_lower:
46
+ return "πŸ’³"
47
+ if "account" in label_lower or "password" in label_lower or "register" in label_lower or "profile" in label_lower:
48
+ return "πŸ‘€"
49
+ if "cancel" in label_lower or "delete" in label_lower or "problem" in label_lower or "issue" in label_lower:
50
+ return "⚠️"
51
+ if "contact" in label_lower or "service" in label_lower or "support" in label_lower:
52
+ return "🎧"
53
+ return "πŸ”Ή"
54
+
55
+ def _format_label(self, label: str) -> str:
56
+ """Cleans up raw dataset labels for professional UI presentation."""
57
+ return label.replace("_", " ").title()
58
+
59
+ @torch.inference_mode()
60
+ def predict(self, text: str, top_k: int = 5):
61
+ if not text or not text.strip():
62
+ return "<div style='color: #ef4444; padding: 10px;'>⚠️ <b>Input Required:</b> Please enter a customer query.</div>", None
63
+
64
+ try:
65
+ inputs = self.tokenizer(
66
+ text.strip(),
67
+ return_tensors="pt",
68
+ truncation=True,
69
+ max_length=self.max_length,
70
+ padding=True
71
+ ).to(self.device)
72
+
73
+ logits = self.model(**inputs).logits
74
+ probs = F.softmax(logits, dim=-1).squeeze()
75
+
76
+ if probs.dim() == 0:
77
+ probs = probs.unsqueeze(0)
78
+
79
+ # Cap top_k to the maximum number of available classes
80
+ actual_top_k = min(top_k, self.num_classes)
81
+
82
+ top_indices = torch.topk(probs, k=actual_top_k).indices.tolist()
83
+ top_probs = torch.topk(probs, k=actual_top_k).values.tolist()
84
+ id2label = self.model.config.id2label
85
+
86
+ # Primary Prediction Formatting
87
+ top_intent_raw = id2label[top_indices[0]]
88
+ emoji = self._get_iconography(top_intent_raw)
89
+ clean_label = self._format_label(top_intent_raw)
90
+ confidence = top_probs[0] * 100
91
+
92
+ result_html = f"""
93
+ <h2 style='margin-bottom: 5px; display: flex; align-items: center; gap: 8px;'>{emoji} {clean_label}</h2>
94
+ <p style='margin-top: 0; font-size: 16px;'><b>Confidence:</b> {confidence:.1f}%</p>
95
+ <hr style='border-top: 1px solid var(--border-color-primary); margin: 20px 0;'/>
96
+ <h3 style='margin-bottom: 15px;'>πŸ“Š Top {actual_top_k} Predictions</h3>
97
+ """
98
+
99
+ # HTML Progress Bars
100
+ for idx, prob in zip(top_indices, top_probs):
101
+ intent_raw = id2label[idx]
102
+ e = self._get_iconography(intent_raw)
103
+ l = self._format_label(intent_raw)
104
+ pct = prob * 100
105
+
106
+ bar_html = f"""
107
+ <div style="margin-bottom: 16px;">
108
+ <div style="display: flex; justify-content: space-between; margin-bottom: 4px;">
109
+ <strong>{e} {l}</strong>
110
+ <span style="font-family:monospace;">{pct:.1f}%</span>
111
+ </div>
112
+ <div style="background-color: var(--background-fill-secondary); border: 1px solid var(--border-color-primary); border-radius: 6px; width: 100%; height: 10px;">
113
+ <div style="background-color: #8b5cf6; width: {pct}%; height: 100%; border-radius: 5px;"></div>
114
+ </div>
115
+ </div>
116
+ """
117
+ result_html += bar_html
118
+
119
+ # Format data for the full distribution chart
120
+ chart_data = {
121
+ self._format_label(id2label[i]): float(probs[i].item())
122
+ for i in range(len(probs))
123
+ }
124
+
125
+ return result_html, chart_data
126
+
127
+ except Exception as e:
128
+ logger.error(f"Inference error: {e}")
129
+ return f"<div style='color: #ef4444;'>❌ <b>System Error:</b> Inference failed. Check application logs.</div>", None
130
+
131
+ # Initialize application backend
132
+ app_backend = MagicSupportClassifier()
133
+
134
+ # High-value test scenarios based on Bitext taxonomy
135
+ EXAMPLES = [
136
+ ["I need to cancel my order immediately, it was placed by mistake.", 5],
137
+ ["Where can I find the invoice for my last purchase?", 3],
138
+ ["The item arrived damaged and I want a full refund.", 5],
139
+ ["How do I change the shipping address on my account?", 3],
140
+ ["I forgot my password and cannot log in.", 3],
141
+ ["Are there any hidden fees if I cancel my subscription now?", 5],
142
+ ]
143
+
144
+ # Build Gradio Interface
145
+ with gr.Blocks(
146
+ theme=gr.themes.Soft(primary_hue="violet", secondary_hue="slate"),
147
+ title="MagicSupport Intent Classifier R&D Dashboard",
148
+ css="""
149
+ .header-box { text-align: center; padding: 25px; background: var(--background-fill-secondary); border-radius: 10px; border: 1px solid var(--border-color-primary); margin-bottom: 20px;}
150
+ .header-box h1 { color: var(--body-text-color); margin-bottom: 5px; }
151
+ .header-box p { color: var(--body-text-color-subdued); font-size: 16px; margin-top: 0; }
152
+ .badge { display: inline-block; padding: 4px 12px; border-radius: 12px; font-size: 13px; font-weight: 600; margin: 4px; }
153
+ .domain-badge { background: #ede9fe; color: #5b21b6; border: 1px solid #ddd6fe;}
154
+ .metric-badge { background: #f1f5f9; color: #334155; border: 1px solid #cbd5e1;}
155
+ footer { display: none !important; }
156
+ """
157
+ ) as demo:
158
+
159
+ gr.HTML("""
160
+ <div class="header-box">
161
+ <h1>🎧 MagicSupport Intent Classifier</h1>
162
+ <p>
163
+ High-precision semantic routing for automated customer support pipelines.
164
+ </p>
165
+ <div style="margin-top:12px;">
166
+ <span class="badge domain-badge">E-commerce & Retail</span>
167
+ <span class="badge domain-badge">Account Management</span>
168
+ <span class="badge domain-badge">Billing & Refunds</span>
169
+ <span class="badge metric-badge">Based on Bitext Taxonomy</span>
170
+ </div>
171
+ </div>
172
+ """)
173
+
174
+ with gr.Row():
175
+ with gr.Column(scale=5):
176
+ text_input = gr.Textbox(
177
+ label="Input Customer Query",
178
+ placeholder="Type a customer message here (e.g., 'Where is my package?')...",
179
+ lines=3,
180
+ )
181
+
182
+ with gr.Row():
183
+ top_k_slider = gr.Slider(
184
+ minimum=1, maximum=15, value=5, step=1,
185
+ label="Display Top-K Predictions"
186
+ )
187
+
188
+ with gr.Row():
189
+ predict_btn = gr.Button("πŸ” Execute Prediction", variant="primary")
190
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Interface", variant="secondary")
191
+
192
+ gr.Examples(
193
+ examples=EXAMPLES,
194
+ inputs=[text_input, top_k_slider],
195
+ label="Actionable Test Scenarios",
196
+ examples_per_page=6,
197
+ )
198
+
199
+ with gr.Column(scale=5):
200
+ result_output = gr.HTML(label="Inference Results")
201
+
202
+ with gr.Row():
203
+ chart_output = gr.Label(
204
+ label="Full Semantic Distribution Map",
205
+ num_top_classes=app_backend.num_classes # Dynamically set based on model config
206
+ )
207
+
208
+ with gr.Accordion("βš™οΈ Technical Architecture & Model Details", open=False):
209
+ gr.Markdown("""
210
+ ### Core Specifications
211
+ * **Target Model:** `learn-abc/magicSupport-intent-classifier`
212
+ * **Objective:** Multi-class text sequence classification for customer support routing.
213
+ * **Dataset Lineage:** Trained on the comprehensive `bitext/Bitext-customer-support-llm-chatbot-training-dataset`.
214
+
215
+ ### Pipeline Features
216
+ * **Dynamic Label Resolution:** The UI heuristic engine automatically maps raw dataset labels (e.g., `change_shipping_address`) into clean, professional UI elements (e.g., Change Shipping Address) and assigns contextual iconography.
217
+ * **Optimized Inference:** Utilizes PyTorch `inference_mode` for reduced memory footprint and accelerated compute during forward passes.
218
+ """)
219
+
220
+ # Event Wiring
221
+ predict_btn.click(
222
+ fn=app_backend.predict,
223
+ inputs=[text_input, top_k_slider],
224
+ outputs=[result_output, chart_output],
225
+ )
226
+ text_input.submit(
227
+ fn=app_backend.predict,
228
+ inputs=[text_input, top_k_slider],
229
+ outputs=[result_output, chart_output],
230
+ )
231
+ clear_btn.click(
232
+ fn=lambda: ("", 5, "", None),
233
+ outputs=[text_input, top_k_slider, result_output, chart_output],
234
+ )
235
+
236
+ if __name__ == "__main__":
237
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ torch
3
+ transformers