pratikshahp commited on
Commit
fd8baf2
·
verified ·
1 Parent(s): 860236f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -64
app.py CHANGED
@@ -1,8 +1,8 @@
1
  import os
 
2
  from langgraph.graph import StateGraph, START
3
  from langgraph.types import Command
4
  from dotenv import load_dotenv
5
- from typing import Literal
6
  import gradio as gr
7
  from langchain_huggingface import HuggingFaceEndpoint
8
 
@@ -10,15 +10,7 @@ from langchain_huggingface import HuggingFaceEndpoint
10
  load_dotenv()
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
 
13
- # Define HuggingFaceEndpoint for urgency classification
14
- classifier = HuggingFaceEndpoint(
15
- repo_id="mistralai/Mistral-7B-Instruct-v0.3", # You can use any model fine-tuned for text classification
16
- huggingfacehub_api_token=HF_TOKEN.strip(),
17
- temperature=0.5,
18
- max_new_tokens=10, # For short classification outputs
19
- )
20
-
21
- # Define HuggingFaceEndpoint for response generation
22
  llm = HuggingFaceEndpoint(
23
  repo_id="mistralai/Mistral-7B-Instruct-v0.3",
24
  huggingfacehub_api_token=HF_TOKEN.strip(),
@@ -26,87 +18,84 @@ llm = HuggingFaceEndpoint(
26
  max_new_tokens=200,
27
  )
28
 
29
- # Define state
30
  class State(dict):
31
- issue: str
32
  priority: str
33
- response: str
34
  escalation_needed: bool
 
 
35
 
36
  # Create the graph
37
  builder = StateGraph(State)
38
 
39
- # Define nodes (agents)
 
 
40
  def ticket_creation_agent(state: State) -> Command[Literal["priority_classification_agent"]]:
41
- """Create a ticket based on customer issue."""
42
- return Command(update={"issue": state["issue"]}, goto="priority_classification_agent")
43
 
44
- def priority_classification_agent(state: State) -> Command[Literal["response_generation_agent"]]:
45
- """Classify ticket priority based on NLP analysis of the issue."""
46
- issue = state["issue"]
 
 
47
 
48
- # Use the model to classify urgency level
49
- prompt = f"Classify the urgency of this issue: {issue}"
50
- priority = classifier(prompt)
51
-
52
- # Use the classification result to assign priority
53
- if "urgent" in priority.lower():
54
- priority = "High"
55
- elif "critical" in priority.lower():
56
- priority = "Critical"
57
  else:
58
- priority = "Normal"
59
-
60
- return Command(update={"priority": priority}, goto="response_generation_agent")
61
 
62
- def response_generation_agent(state: State) -> Command[Literal["escalation_agent"]]:
63
- """Generate response based on issue and priority."""
64
- prompt = f"""
65
- Customer Issue: {state['issue']}
66
- Priority: {state['priority']}
67
-
68
- You are a customer service representative. Provide a detailed response to this issue based on its priority.
69
- """
70
- response = llm(prompt)
71
- return Command(update={"response": response, "escalation_needed": state['priority'] == "High"}, goto="escalation_agent")
72
 
73
- def escalation_agent(state: State):
74
- """Decide if the ticket needs to be escalated based on priority."""
75
- escalation = "Yes" if state["escalation_needed"] else "No"
76
- return {"response": state["response"], "escalation": escalation}
 
 
 
77
 
78
  # Add nodes to the graph
79
  builder.add_edge(START, "ticket_creation_agent")
80
  builder.add_node("ticket_creation_agent", ticket_creation_agent)
81
  builder.add_node("priority_classification_agent", priority_classification_agent)
82
- builder.add_node("response_generation_agent", response_generation_agent)
83
- builder.add_node("escalation_agent", escalation_agent)
84
 
85
  # Compile the graph
86
  graph = builder.compile()
87
 
88
- # Gradio Interface
89
- def process_ticket(issue: str):
90
- """Run the multi-agent customer support flow with user input."""
91
- state = {"issue": issue}
92
  result = graph.invoke(state)
93
 
94
- # Return the customer service response and escalation info
95
- return result["response"], result["escalation"]
96
 
 
97
  iface = gr.Interface(
98
  fn=process_ticket,
99
- inputs=[
100
- gr.Textbox(
101
- label="Enter Customer Issue",
102
- placeholder="Describe the issue that needs to be addressed...",
103
- ),
104
- ],
105
- outputs=[
106
- gr.Textbox(label="Generated Customer Service Response"),
107
- gr.Textbox(label="Escalation Status"),
108
- ],
109
- title="Customer Support Multi-Agent System",
110
  )
111
 
112
  iface.launch()
 
1
  import os
2
+ from typing import List, Dict, Literal
3
  from langgraph.graph import StateGraph, START
4
  from langgraph.types import Command
5
  from dotenv import load_dotenv
 
6
  import gradio as gr
7
  from langchain_huggingface import HuggingFaceEndpoint
8
 
 
10
  load_dotenv()
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
 
13
+ # Define HuggingFaceEndpoint
 
 
 
 
 
 
 
 
14
  llm = HuggingFaceEndpoint(
15
  repo_id="mistralai/Mistral-7B-Instruct-v0.3",
16
  huggingfacehub_api_token=HF_TOKEN.strip(),
 
18
  max_new_tokens=200,
19
  )
20
 
21
+ # Define state class for managing the ticket state
22
  class State(dict):
23
+ issue_description: str
24
  priority: str
 
25
  escalation_needed: bool
26
+ response: str
27
+ escalation: str
28
 
29
  # Create the graph
30
  builder = StateGraph(State)
31
 
32
+ # Define the nodes for the multi-agent flow
33
+
34
+ # Ticket creation agent
35
  def ticket_creation_agent(state: State) -> Command[Literal["priority_classification_agent"]]:
36
+ """Capture ticket description and proceed to priority classification."""
37
+ return Command(update={"issue_description": state["issue_description"]}, goto="priority_classification_agent")
38
 
39
+ # Priority classification agent
40
+ def priority_classification_agent(state: State) -> Command[Literal["escalation_classification_agent"]]:
41
+ """Classify priority based on the issue description."""
42
+ prompt = f"Classify the following issue as urgent, critical, or normal: {state['issue_description']}"
43
+ priority = llm.invoke(prompt) # Use .invoke() instead of __call__
44
 
45
+ # Update the priority and proceed to escalation classification
46
+ return Command(update={"priority": priority}, goto="escalation_classification_agent")
47
+
48
+ # Escalation classification agent
49
+ def escalation_classification_agent(state: State) -> Command[Literal["generate_response_agent"]]:
50
+ """Classify whether escalation is needed based on priority."""
51
+ if state["priority"].lower() in ["urgent", "critical"]:
52
+ escalation_needed = True
 
53
  else:
54
+ escalation_needed = False
 
 
55
 
56
+ return Command(update={"escalation_needed": escalation_needed}, goto="generate_response_agent")
57
+
58
+ # Generate response agent
59
+ def generate_response_agent(state: State) -> Dict[str, str]:
60
+ """Generate response based on ticket priority and escalation need."""
61
+ if state["escalation_needed"]:
62
+ escalation = "Escalate the issue to a senior team member immediately."
63
+ else:
64
+ escalation = "No escalation needed."
 
65
 
66
+ prompt = f"Generate a response for the following issue: {state['issue_description']}. The priority is {state['priority']}."
67
+ response = llm.invoke(prompt) # Use .invoke() instead of __call__
68
+
69
+ return {
70
+ "response": response,
71
+ "escalation": escalation
72
+ }
73
 
74
  # Add nodes to the graph
75
  builder.add_edge(START, "ticket_creation_agent")
76
  builder.add_node("ticket_creation_agent", ticket_creation_agent)
77
  builder.add_node("priority_classification_agent", priority_classification_agent)
78
+ builder.add_node("escalation_classification_agent", escalation_classification_agent)
79
+ builder.add_node("generate_response_agent", generate_response_agent)
80
 
81
  # Compile the graph
82
  graph = builder.compile()
83
 
84
+ # Gradio Interface function to process the ticket
85
+ def process_ticket(issue_description: str):
86
+ """Process the issue ticket through the multi-agent flow."""
87
+ state = {"issue_description": issue_description}
88
  result = graph.invoke(state)
89
 
90
+ # Return the response and escalation result
91
+ return result.get("response", "No response generated"), result.get("escalation", "No escalation specified")
92
 
93
+ # Gradio Interface
94
  iface = gr.Interface(
95
  fn=process_ticket,
96
+ inputs=gr.Textbox(label="Describe the issue"),
97
+ outputs=[gr.Textbox(label="Response"), gr.Textbox(label="Escalation Decision")],
98
+ title="Ticket Handling System",
 
 
 
 
 
 
 
 
99
  )
100
 
101
  iface.launch()