pratikshahp commited on
Commit
9b6ed74
·
verified ·
1 Parent(s): fd8baf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -23
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import os
2
- from typing import List, Dict, Literal
3
  from langgraph.graph import StateGraph, START
4
  from langgraph.types import Command
5
  from dotenv import load_dotenv
@@ -40,7 +40,7 @@ def ticket_creation_agent(state: State) -> Command[Literal["priority_classificat
40
  def priority_classification_agent(state: State) -> Command[Literal["escalation_classification_agent"]]:
41
  """Classify priority based on the issue description."""
42
  prompt = f"Classify the following issue as urgent, critical, or normal: {state['issue_description']}"
43
- priority = llm.invoke(prompt) # Use .invoke() instead of __call__
44
 
45
  # Update the priority and proceed to escalation classification
46
  return Command(update={"priority": priority}, goto="escalation_classification_agent")
@@ -48,28 +48,17 @@ def priority_classification_agent(state: State) -> Command[Literal["escalation_c
48
  # Escalation classification agent
49
  def escalation_classification_agent(state: State) -> Command[Literal["generate_response_agent"]]:
50
  """Classify whether escalation is needed based on priority."""
51
- if state["priority"].lower() in ["urgent", "critical"]:
52
- escalation_needed = True
53
- else:
54
- escalation_needed = False
55
-
56
  return Command(update={"escalation_needed": escalation_needed}, goto="generate_response_agent")
57
 
58
  # Generate response agent
59
  def generate_response_agent(state: State) -> Dict[str, str]:
60
  """Generate response based on ticket priority and escalation need."""
61
- if state["escalation_needed"]:
62
- escalation = "Escalate the issue to a senior team member immediately."
63
- else:
64
- escalation = "No escalation needed."
65
-
66
  prompt = f"Generate a response for the following issue: {state['issue_description']}. The priority is {state['priority']}."
67
- response = llm.invoke(prompt) # Use .invoke() instead of __call__
68
 
69
- return {
70
- "response": response,
71
- "escalation": escalation
72
- }
73
 
74
  # Add nodes to the graph
75
  builder.add_edge(START, "ticket_creation_agent")
@@ -85,17 +74,24 @@ graph = builder.compile()
85
  def process_ticket(issue_description: str):
86
  """Process the issue ticket through the multi-agent flow."""
87
  state = {"issue_description": issue_description}
88
- result = graph.invoke(state)
89
-
90
- # Return the response and escalation result
91
- return result.get("response", "No response generated"), result.get("escalation", "No escalation specified")
 
 
 
92
 
93
  # Gradio Interface
94
  iface = gr.Interface(
95
  fn=process_ticket,
96
  inputs=gr.Textbox(label="Describe the issue"),
97
- outputs=[gr.Textbox(label="Response"), gr.Textbox(label="Escalation Decision")],
 
 
 
98
  title="Ticket Handling System",
99
  )
100
 
101
- iface.launch()
 
 
1
  import os
2
+ from typing import Dict, Literal
3
  from langgraph.graph import StateGraph, START
4
  from langgraph.types import Command
5
  from dotenv import load_dotenv
 
40
  def priority_classification_agent(state: State) -> Command[Literal["escalation_classification_agent"]]:
41
  """Classify priority based on the issue description."""
42
  prompt = f"Classify the following issue as urgent, critical, or normal: {state['issue_description']}"
43
+ priority = llm.invoke(prompt).strip() # Use .invoke() instead of __call__
44
 
45
  # Update the priority and proceed to escalation classification
46
  return Command(update={"priority": priority}, goto="escalation_classification_agent")
 
48
  # Escalation classification agent
49
  def escalation_classification_agent(state: State) -> Command[Literal["generate_response_agent"]]:
50
  """Classify whether escalation is needed based on priority."""
51
+ escalation_needed = state["priority"].lower() in ["urgent", "critical"]
 
 
 
 
52
  return Command(update={"escalation_needed": escalation_needed}, goto="generate_response_agent")
53
 
54
  # Generate response agent
55
  def generate_response_agent(state: State) -> Dict[str, str]:
56
  """Generate response based on ticket priority and escalation need."""
57
+ escalation = "Escalate the issue to a senior team member immediately." if state["escalation_needed"] else "No escalation needed."
 
 
 
 
58
  prompt = f"Generate a response for the following issue: {state['issue_description']}. The priority is {state['priority']}."
59
+ response = llm.invoke(prompt).strip() # Use .invoke() and ensure the response is clean
60
 
61
+ return {"response": response, "escalation": escalation}
 
 
 
62
 
63
  # Add nodes to the graph
64
  builder.add_edge(START, "ticket_creation_agent")
 
74
  def process_ticket(issue_description: str):
75
  """Process the issue ticket through the multi-agent flow."""
76
  state = {"issue_description": issue_description}
77
+ try:
78
+ result = graph.invoke(state)
79
+ response = result.get("response", "No response generated")
80
+ escalation = result.get("escalation", "No escalation specified")
81
+ return response, escalation
82
+ except Exception as e:
83
+ return f"Error occurred: {e}", "Unable to determine escalation"
84
 
85
  # Gradio Interface
86
  iface = gr.Interface(
87
  fn=process_ticket,
88
  inputs=gr.Textbox(label="Describe the issue"),
89
+ outputs=[
90
+ gr.Textbox(label="Response"),
91
+ gr.Textbox(label="Escalation Decision"),
92
+ ],
93
  title="Ticket Handling System",
94
  )
95
 
96
+ if __name__ == "__main__":
97
+ iface.launch()