| import gradio as gr |
| import requests |
| import matplotlib.pyplot as plt |
| import matplotlib |
| matplotlib.use('Agg') |
|
|
| BACKEND_URL = "http://localhost:8000" |
|
|
| def classify_intent(text): |
| if not text.strip(): |
| return "Please enter a message", "", "" |
| |
| try: |
| |
| classify_response = requests.post( |
| f"{BACKEND_URL}/classify", |
| json={"text": text} |
| ) |
| result = classify_response.json() |
| |
| |
| top3_text = "" |
| for i, item in enumerate(result["top3"], 1): |
| intent = item["intent"].replace("_", " ").title() |
| conf = item["confidence"] |
| top3_text += f"{i}. {intent} β {conf}%\n" |
| |
| |
| if result.get("fallback"): |
| return ( |
| f"β οΈ **Unknown Query** ({result['confidence']}%)", |
| top3_text, |
| result["fallback_message"] |
| ) |
| |
| top_intent = result["top_intent"].replace("_", " ").title() |
| confidence = result["confidence"] |
| |
| |
| respond_response = requests.post( |
| f"{BACKEND_URL}/respond", |
| json={ |
| "text": text, |
| "intent": result["top_intent"] |
| } |
| ) |
| generated = respond_response.json()["response"] |
| |
| return ( |
| f"**{top_intent}** ({confidence}%)", |
| top3_text, |
| generated |
| ) |
| |
| except Exception as e: |
| return f"Error: {str(e)}", "", "" |
|
|
| def get_analytics(): |
| try: |
| response = requests.get(f"{BACKEND_URL}/analytics") |
| data = response.json() |
| |
| total = data["total_queries"] |
| unique = data["unique_intents_seen"] |
| top_intents = data["top_intents"] |
| recent = data["recent_queries"] |
| |
| |
| if top_intents: |
| labels = [x["intent"].replace("_", " ").title() for x in top_intents] |
| counts = [x["count"] for x in top_intents] |
| |
| fig, ax = plt.subplots(figsize=(10, 5)) |
| bars = ax.barh(labels[::-1], counts[::-1], color="#4F86C6") |
| ax.set_xlabel("Number of Queries") |
| ax.set_title("Top Intents by Frequency") |
| |
| |
| for bar, count in zip(bars, counts[::-1]): |
| ax.text( |
| bar.get_width() + 0.1, |
| bar.get_y() + bar.get_height()/2, |
| str(count), |
| va='center' |
| ) |
| |
| plt.tight_layout() |
| else: |
| fig, ax = plt.subplots() |
| ax.text(0.5, 0.5, "No queries yet", ha='center', va='center') |
|
|
| |
| recent_text = "" |
| if recent: |
| recent_text = "Time | Intent | Confidence\n" |
| recent_text += "-" * 50 + "\n" |
| for q in reversed(recent): |
| intent = q["intent"].replace("_", " ").title() |
| recent_text += f"{q['timestamp']} | {intent} | {q['confidence']}%\n" |
| else: |
| recent_text = "No queries yet" |
| |
| summary = f"Total Queries: {total} | Unique Intents Seen: {unique}" |
| |
| return fig, recent_text, summary |
| |
| except Exception as e: |
| fig, ax = plt.subplots() |
| ax.text(0.5, 0.5, f"Error: {str(e)}", ha='center', va='center') |
| return fig, "", "Error fetching analytics" |
|
|
| def compare_models(text): |
| if not text.strip(): |
| return "Please enter a message", "", "Please enter a message", "" |
| |
| try: |
| response = requests.post( |
| f"{BACKEND_URL}/compare", |
| json={"text": text} |
| ) |
| data = response.json() |
| |
| |
| zs = data["zero_shot"] |
| zs_intent = zs["top_intent"].replace("_", " ").title() |
| zs_conf = zs["confidence"] |
| zs_top3 = "" |
| for i, item in enumerate(zs["top3"], 1): |
| intent = item["intent"].replace("_", " ").title() |
| zs_top3 += f"{i}. {intent} β {item['confidence']}%\n" |
| |
| |
| ft = data["fine_tuned"] |
| ft_intent = ft["top_intent"].replace("_", " ").title() |
| ft_conf = ft["confidence"] |
| ft_top3 = "" |
| for i, item in enumerate(ft["top3"], 1): |
| intent = item["intent"].replace("_", " ").title() |
| ft_top3 += f"{i}. {intent} β {item['confidence']}%\n" |
| |
| return ( |
| f"**{zs_intent}** ({zs_conf}%)", |
| zs_top3, |
| f"**{ft_intent}** ({ft_conf}%)", |
| ft_top3 |
| ) |
| |
| except Exception as e: |
| return f"Error: {str(e)}", "", "", "" |
|
|
| with gr.Blocks(title="Banking Intent Classifier") as demo: |
| gr.Markdown("# π¦ Banking Intent Classifier") |
| gr.Markdown("Powered by fine-tuned Qwen2.5 + LoRA | Trained on BANKING77") |
| |
| with gr.Tabs(): |
| with gr.Tab("π Classify"): |
| with gr.Row(): |
| with gr.Column(): |
| text_input = gr.Textbox( |
| label="Customer Message", |
| placeholder="e.g. my card hasn't arrived yet...", |
| lines=3 |
| ) |
| submit_btn = gr.Button("Classify", variant="primary") |
| |
| with gr.Column(): |
| intent_output = gr.Markdown(label="Detected Intent") |
| top3_output = gr.Textbox( |
| label="Top 3 Predictions", |
| lines=4, |
| interactive=False |
| ) |
| |
| |
| response_output = gr.Textbox( |
| label="π¬ Suggested Customer Service Response", |
| lines=4, |
| interactive=False |
| ) |
| |
| gr.Examples( |
| examples=[ |
| ["My card hasn't arrived yet"], |
| ["I can't remember my PIN"], |
| ["My transfer failed"], |
| ["I think my card was stolen"], |
| ["Why is my balance wrong?"], |
| ["hi"], |
| ["what is life"], |
| ], |
| inputs=text_input |
| ) |
| |
| submit_btn.click( |
| fn=classify_intent, |
| inputs=text_input, |
| outputs=[intent_output, top3_output, response_output] |
| ) |
| |
| with gr.Tab("π Analytics"): |
| refresh_btn = gr.Button("Refresh Dashboard", variant="primary") |
| summary_output = gr.Markdown() |
| |
| with gr.Row(): |
| chart_output = gr.Plot(label="Intent Frequency") |
| recent_output = gr.Textbox( |
| label="Recent Queries", |
| lines=12, |
| interactive=False |
| ) |
| |
| refresh_btn.click( |
| fn=get_analytics, |
| outputs=[chart_output, recent_output, summary_output] |
| ) |
|
|
| with gr.Tab("βοΈ Model Comparison"): |
| gr.Markdown(""" |
| ### Zero-shot vs Fine-tuned |
| See the impact of fine-tuning in real time. |
| Same query, same base model β the only difference is LoRA fine-tuning on BANKING77. |
| """) |
| |
| compare_input = gr.Textbox( |
| label="Customer Message", |
| placeholder="e.g. my card hasn't arrived yet...", |
| lines=2 |
| ) |
| compare_btn = gr.Button("Compare Models", variant="primary") |
| |
| with gr.Row(): |
| with gr.Column(): |
| gr.Markdown("### β Zero-shot (no fine-tuning)") |
| zero_intent = gr.Markdown() |
| zero_top3 = gr.Textbox( |
| label="Top 3 Predictions", |
| lines=4, |
| interactive=False |
| ) |
| |
| with gr.Column(): |
| gr.Markdown("### β
Fine-tuned (LoRA on BANKING77)") |
| ft_intent = gr.Markdown() |
| ft_top3 = gr.Textbox( |
| label="Top 3 Predictions", |
| lines=4, |
| interactive=False |
| ) |
| |
| gr.Examples( |
| examples=[ |
| ["My card hasn't arrived yet"], |
| ["I forgot my PIN"], |
| ["My transfer failed"], |
| ["hi"], |
| ], |
| inputs=compare_input |
| ) |
| |
| compare_btn.click( |
| fn=compare_models, |
| inputs=compare_input, |
| outputs=[zero_intent, zero_top3, ft_intent, ft_top3] |
| ) |
|
|
| if __name__ == "__main__": |
| |
| demo.launch(server_name="0.0.0.0", server_port=7860) |