Spaces:
Runtime error
Runtime error
File size: 6,247 Bytes
09ec7e7 9b6ed74 09ec7e7 fd8baf2 09ec7e7 8e67fc2 09ec7e7 fd8baf2 09ec7e7 fd8baf2 09ec7e7 fd8baf2 09ec7e7 fd8baf2 09ec7e7 fd8baf2 09ec7e7 fd8baf2 b4badb1 3317895 b4badb1 83e4d01 b4badb1 67f3a0c 3317895 b4badb1 3317895 5a07a78 3317895 fd8baf2 3317895 fd8baf2 3317895 cfb17ad 3317895 b4badb1 3317895 fd8baf2 9b6ed74 09ec7e7 1562b2b 83e4d01 1562b2b dc535bf 1562b2b 3317895 09ec7e7 fd8baf2 09ec7e7 fd8baf2 9b6ed74 0d2a463 93fe64b 9b6ed74 09ec7e7 fd8baf2 09ec7e7 fd8baf2 9b6ed74 0d2a463 9b6ed74 fd8baf2 09ec7e7 9b6ed74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | import os
from typing import Dict, Literal
from langgraph.graph import StateGraph, START
from langgraph.types import Command
from dotenv import load_dotenv
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint
# Load environment variables
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
# Define HuggingFaceEndpoint
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
huggingfacehub_api_token=HF_TOKEN.strip(),
temperature=0.7,
max_new_tokens=150,
)
# Define state class for managing the ticket state
class State(dict):
issue_description: str
priority: str
escalation_needed: bool
response: str
escalation: str
# Create the graph
builder = StateGraph(State)
# Define the nodes for the multi-agent flow
# Ticket creation agent
def ticket_creation_agent(state: State) -> Command[Literal["priority_classification_agent"]]:
"""Capture ticket description and proceed to priority classification."""
return Command(update={"issue_description": state["issue_description"]}, goto="priority_classification_agent")
# Priority classification agent
def priority_classification_agent(state: State) -> Command[Literal["escalation_classification_agent"]]:
"""Classify priority based on the issue description."""
#prompt = (
# f"You are a support assistant. Classify the following issue as "
# f"'urgent', 'critical', or 'normal' based on its severity:\n\n"
# f"Issue: {state['issue_description']}"
prompt = (
f"You are a support assistant. Based on the severity, classify the issues "
f"into one of the following categories:\n"
f"1. Urgent: Issues causing significant business impact (e.g., payment gateway failure, server down or issues that can not wait as per company's well being).\n"
f"2. Critical: Issues needing immediate attention but with manageable impact.\n"
f"3. Normal: Issues that can wait for routine handling.\n\n"
f"Issue: {state['issue_description']} \n"
f"Clearly specify only one category: 'urgent', 'critical', or 'normal'."
)
#)
priority = llm.invoke(prompt).strip().lower()
print("priority : ", priority)
# Ensure valid priority classification
if priority not in ["urgent", "critical", "normal"]:
priority = "normal" # Default to 'normal' if the classification fails
return Command(update={"priority": priority}, goto="escalation_classification_agent")
# Generate response agent
def generate_response_agent(state: State) -> Dict[str, str]:
"""Generate response based on ticket priority and escalation need."""
escalation = (
"Escalate the issue to a senior team member immediately."
if state["escalation_needed"]
else "No escalation needed."
)
# Updated prompt to guide the model explicitly
prompt = (
f"You are a customer service assistant. Generate a complete, concise and actionable response "
f"for the following issue:\n\n"
f"Issue: {state['issue_description']}\n"
f"Priority: {state['priority']}.\n\n"
f"Your response should directly address the issue and provide next steps."
)
response = llm.invoke(prompt).strip()
return {"response": response, "escalation": escalation}
# Escalation classification agent
def escalation_classification_agent(state: State) -> Command[Literal["generate_response_agent"]]:
"""Classify whether escalation is needed based on priority."""
escalation_needed = state["priority"].lower() in ["urgent", "critical"]
print(f"Escalation Needed: {escalation_needed}, Priority: {state['priority']}") # Debugging escalation
return Command(update={"escalation_needed": escalation_needed}, goto="generate_response_agent")
# Gradio Interface function to process the ticket
def process_ticket(issue_description: str):
"""Process the issue ticket through the multi-agent flow."""
state = {"issue_description": issue_description}
try:
print(f"Initial Issue Description: {issue_description}") # Debug log
result = graph.invoke(state)
print(f"Graph Result: {result}") # Debug log
response = result.get("response", "No response generated")
escalation = result.get("escalation", "No escalation specified")
return response, escalation
except Exception as e:
print(f"Error occurred: {e}") # Debug log
return f"Error occurred: {e}", "Unable to determine escalation"
# Add nodes to the graph
builder.add_edge(START, "ticket_creation_agent")
builder.add_node("ticket_creation_agent", ticket_creation_agent)
builder.add_node("priority_classification_agent", priority_classification_agent)
builder.add_node("escalation_classification_agent", escalation_classification_agent)
builder.add_node("generate_response_agent", generate_response_agent)
# Compile the graph
graph = builder.compile()
# Gradio Interface function to process the ticket
def process_ticket(issue_description: str):
"""Process the issue ticket through the multi-agent flow."""
state = {"issue_description": issue_description}
try:
result = graph.invoke(state)
response = result.get("response", "No response generated")
escalation = result.get("escalation", "No escalation specified")
mermaid_code = graph.get_graph().draw_mermaid()
return (
response,
escalation,
"## Mermaid Graph",
"Check out this [mermaid link](https://mermaid.live/) to display a graph with the following data",
f"```mermaid\n{mermaid_code}\n```"
)
except Exception as e:
return f"Error occurred: {e}", "Unable to determine escalation"
# Gradio Interface
iface = gr.Interface(
fn=process_ticket,
inputs=gr.Textbox(label="Describe the issue"),
outputs=[
gr.Textbox(label="Response"),
gr.Textbox(label="Escalation Decision"),
gr.Markdown(), # Placeholder for the "Mermaid Graph" heading
gr.Markdown(), # Placeholder for the link text
gr.Markdown(label="Mermaid Graph Visualization") # Mermaid visualization
],
title="Ticket Handling System",
)
if __name__ == "__main__":
iface.launch()
|