JatsTheAIGen commited on
Commit
8818bfc
·
1 Parent(s): 810f6c8

Configure app for Hugging Face deployment

Browse files
Files changed (1) hide show
  1. graph.py +56 -82
graph.py CHANGED
@@ -1,4 +1,4 @@
1
- # graph.py (Updated with Triage Agent)
2
 
3
  import json
4
  import re
@@ -13,10 +13,10 @@ from logging_config import setup_logging, get_logger
13
  # --- Setup Logging & Constants ---
14
  setup_logging()
15
  log = get_logger(__name__)
16
- INITIAL_MAX_REWORK_CYCLES = 3
17
  GPT4O_INPUT_COST_PER_1K_TOKENS = 0.005
18
  GPT4O_OUTPUT_COST_PER_1K_TOKENS = 0.015
19
- AVG_TOKENS_PER_CALL = 2.0
20
 
21
  # --- Agent State Definition ---
22
  class AgentState(TypedDict):
@@ -33,6 +33,8 @@ class AgentState(TypedDict):
33
  execution_path: List[str]
34
  rework_cycles: int
35
  max_loops: int
 
 
36
 
37
  # --- Helper for Robust JSON Parsing ---
38
  def parse_json_from_llm(llm_output: str) -> Optional[dict]:
@@ -51,57 +53,48 @@ def parse_json_from_llm(llm_output: str) -> Optional[dict]:
51
  # --- LLM Initialization ---
52
  llm = ChatOpenAI(model="gpt-4o", temperature=0.1, max_retries=3, request_timeout=60)
53
 
54
- # --- NEW: Triage Agent ---
55
  def run_triage_agent(state: AgentState):
56
  log.info("--- triage ---")
57
- prompt = f"Analyze the user input. Is it a simple conversational greeting (e.g., 'hello', 'hi', 'how are you?') that doesn't require a complex plan, or is it a task/question that needs to be decomposed and executed? Respond with 'greeting' or 'task'.\n\nUser Input: \"{state['userInput']}\""
58
  response = llm.invoke(prompt)
59
-
60
  if 'greeting' in response.content.lower():
61
- log.info("Triage result: Simple Greeting. Bypassing main workflow.")
62
- # Set a direct, simple response
63
  return {
64
  "draftResponse": "Hello! How can I help you today?",
65
- "execution_path": ["Triage Agent"]
 
66
  }
67
  else:
68
- log.info("Triage result: Complex Task. Proceeding to planner.")
69
- # Let the main workflow handle it
70
- return { "execution_path": ["Triage Agent"] }
71
-
72
- def should_start_full_workflow(state: AgentState):
73
- """Conditional edge to decide if we run the full graph or end early."""
74
- if state.get("draftResponse"):
75
- # If the triage agent already set a response, we are done.
76
- return "end_early"
77
- else:
78
- return "planner"
79
 
80
- # --- Agent Node Functions (Planner, PM, etc. are unchanged) ---
81
  def run_planner_agent(state: AgentState):
82
- log.info("--- ✈️ Running Planner Agent for Pre-flight Check ---")
83
  path = state.get('execution_path', []) + ["Planner Agent"]
84
- prompt = f"Analyze the user's request and provide a high-level plan and cost estimate. 1. Decompose the request into high-level steps. 2. Count the likely number of specialist LLM calls needed for ONE successful loop. User Request: \"{state['userInput']}\". Provide the output ONLY as a valid JSON object within ```json code fences with keys: 'plan' (list of strings), 'estimated_llm_calls_per_loop' (integer)."
85
-
86
  response = llm.invoke(prompt)
87
  plan_data = parse_json_from_llm(response.content)
88
 
89
  if not plan_data:
90
- return {"pmPlan": {"error": "Failed to create a valid plan."}, "execution_path": path}
91
 
92
  calls_per_loop = plan_data.get('estimated_llm_calls_per_loop', 3)
93
  cost_per_loop = (calls_per_loop * AVG_TOKENS_PER_CALL) * ( (GPT4O_INPUT_COST_PER_1K_TOKENS + GPT4O_OUTPUT_COST_PER_1K_TOKENS) / 2 )
94
  estimated_cost = cost_per_loop * (INITIAL_MAX_REWORK_CYCLES + 1)
95
-
96
  plan_data['max_loops_initial'] = INITIAL_MAX_REWORK_CYCLES
97
  plan_data['estimated_cost_usd'] = round(estimated_cost, 2)
98
  plan_data['cost_per_loop_usd'] = max(0.01, round(cost_per_loop, 3))
99
-
100
  log.info(f"Pre-flight Estimate: {plan_data}")
101
- return {"pmPlan": plan_data, "execution_path": path}
102
 
103
- # --- All other agent functions remain the same ---
104
- # ... (run_pm_agent, run_memory_retrieval, etc.)
105
  def run_memory_retrieval(state: AgentState):
106
  log.info("--- 🧠 Accessing Long-Term Memory ---")
107
  path = state.get('execution_path', []) + ["Memory Retriever"]
@@ -112,14 +105,14 @@ def run_memory_retrieval(state: AgentState):
112
  else:
113
  context = "No relevant memories found."
114
  log.info(context)
115
- return {"retrievedMemory": context, "execution_path": path}
116
 
117
  def run_intent_agent(state: AgentState):
118
  log.info("--- 🎯 Running Intent Agent ---")
119
  path = state.get('execution_path', []) + ["Intent Agent"]
120
- prompt = f"As an expert prompt engineer, analyze the user's request, conversation history, and retrieved memories. Refine it into a clear, actionable 'core objective prompt'.\n\nRelevant Memory:\n{state.get('retrievedMemory')}\n\nUser Request: \"{state.get('userInput')}\"\n\nCore Objective:"
121
  response = llm.invoke(prompt)
122
- return {"coreObjectivePrompt": response.content, "execution_path": path}
123
 
124
  def run_pm_agent(state: AgentState):
125
  log.info("--- 👷 Running PM Agent ---")
@@ -128,35 +121,36 @@ def run_pm_agent(state: AgentState):
128
 
129
  path = state.get('execution_path', []) + ["PM Agent"]
130
  feedback = f"QA Feedback (must be addressed): {state.get('qaFeedback')}" if state.get('qaFeedback') else ""
131
- prompt = f"As the Project Manager, decompose the core objective into a plan. Identify if code execution is needed. If so, define the goal of the experiment.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\n{feedback}\n\nProvide your plan ONLY as a valid JSON object within ```json code fences with keys: 'plan_steps' (list of strings) and 'experiment_needed' (boolean). If 'experiment_needed' is true, also include 'experiment_goal' (string)."
132
-
133
  response = llm.invoke(prompt)
134
  plan = parse_json_from_llm(response.content)
135
 
136
  if not plan:
137
  log.error("PM Agent failed to produce a valid JSON plan.")
138
  plan = {"plan_steps": ["Error: The Project Manager failed to create a valid plan."], "experiment_needed": False}
139
-
140
  log.info(f"Generated Plan: Experiment Needed = {plan.get('experiment_needed', False)}")
141
- return {"pmPlan": plan, "execution_path": path, "rework_cycles": current_cycles}
142
 
143
  def run_experimenter_agent(state: AgentState):
144
  log.info("--- 🔬 Running Experimenter Agent ---")
145
  path = state.get('execution_path', []) + ["Experimenter Agent"]
146
  if not state.get('pmPlan', {}).get('experiment_needed'):
147
- return {"experimentCode": None, "experimentResults": None, "execution_path": path}
148
-
149
  goal = state.get('pmPlan', {}).get('experiment_goal', 'No goal specified.')
150
- prompt = f"Write a Python script to achieve this goal. Do not require user input. Print results to standard output.\n\nGoal: {goal}\n\nPython Code:\n```python\n# Your code here\n```"
151
  response = llm.invoke(prompt)
152
  code_match = re.search(r"```python\n(.*?)\n```", response.content, re.DOTALL)
153
  if not code_match:
154
- log.error("Experimenter failed to generate valid Python code within fences.")
155
- return {"experimentCode": "# ERROR: No code generated", "experimentResults": {"success": False, "stderr": "No valid Python code block was generated."}, "execution_path": path}
156
-
 
157
  code = code_match.group(1).strip()
158
  results = execute_python_code(code)
159
- return {"experimentCode": code, "experimentResults": results, "execution_path": path}
160
 
161
  def run_synthesis_agent(state: AgentState):
162
  log.info("--- ���️ Running Synthesis Agent ---")
@@ -164,51 +158,45 @@ def run_synthesis_agent(state: AgentState):
164
  exp_results = state.get('experimentResults')
165
  results_summary = "No experiment was conducted."
166
  if exp_results:
167
- results_summary = f"An experiment was conducted. Output:\nSTDOUT:\n{exp_results.get('stdout', '')}\nSTDERR:\n{exp_results.get('stderr', '')}"
168
-
169
  prompt = f"Synthesize all information into a final response.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\nPlan: {state.get('pmPlan', {}).get('plan_steps')}\n\n{results_summary}\n\nFinal Response:"
170
  response = llm.invoke(prompt)
171
- return {"draftResponse": response.content, "execution_path": path}
172
 
173
  def run_qa_agent(state: AgentState):
174
  log.info("--- ✅ Running QA Agent ---")
175
  path = state.get('execution_path', []) + ["QA Agent"]
176
- prompt = f"Review the draft response based on the core objective. Respond ONLY with 'APPROVED' if it is complete and accurate. Otherwise, provide concise feedback for rework.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\nDraft Response: {state.get('draftResponse')}"
177
  response = llm.invoke(prompt)
178
  if "APPROVED" in response.content.upper():
179
- return {"approved": True, "qaFeedback": None, "execution_path": path}
180
  else:
181
- return {"approved": False, "qaFeedback": response.content or "No specific feedback provided.", "execution_path": path}
182
 
183
  def run_archivist_agent(state: AgentState):
184
  log.info("--- 💾 Running Archivist Agent ---")
185
  path = state.get('execution_path', []) + ["Archivist Agent"]
186
- summary_prompt = f"Create a concise summary of this successful task for long-term memory. Focus on key insights and results.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\nFinal Response: {state.get('draftResponse')}\n\nMemory Summary:"
187
-
188
- # FIX: Use the correct variable name 'summary_prompt' instead of 'prompt'
189
  response = llm.invoke(summary_prompt)
190
-
191
  memory_manager.add_to_memory(response.content, {"objective": state.get('coreObjectivePrompt')})
192
- return {"execution_path": path}
193
 
194
  def run_disclaimer_agent(state: AgentState):
195
  log.warning("--- ⚠️ Running Disclaimer Agent ---")
196
  path = state.get('execution_path', []) + ["Disclaimer Agent"]
197
- disclaimer = "**DISCLAIMER: The process was stopped after exhausting the user-defined budget. The following response is the best available draft but may be incomplete, unverified, or contain errors. Please review it carefully.**\n\n---\n\n"
198
- final_response = disclaimer + state.get('draftResponse', "No response was generated before the budget was exhausted.")
199
- return {"draftResponse": final_response, "execution_path": path}
200
-
201
 
202
  # --- Conditional Edges & Graph Definition ---
203
- # ... (should_continue and should_run_experiment are unchanged)
204
  def should_continue(state: AgentState):
205
  log.info("--- 🤔 Decision: Is the response QA approved? ---")
206
  if state.get("approved"):
207
  log.info("Routing to: Archivist (Success Path)")
208
  return "archivist_agent"
209
-
210
  if state.get("rework_cycles", 0) > state.get("max_loops", 0):
211
- log.error(f"BUDGET LIMIT REACHED. Aborting task after {state.get('rework_cycles', 0)-1} rework cycles.")
212
  return "disclaimer_agent"
213
  else:
214
  log.info("Routing to: PM Agent for rework")
@@ -218,36 +206,22 @@ def should_run_experiment(state: AgentState):
218
  decision = "experimenter_agent" if state.get('pmPlan', {}).get('experiment_needed') else "synthesis_agent"
219
  return decision
220
 
221
- # --- Build the Graph (Updated with Triage) ---
222
- workflow = StateGraph(AgentState)
223
- workflow.add_node("triage", run_triage_agent)
224
- workflow.add_node("memory_retriever", run_memory_retrieval)
225
- # ... add all other nodes
226
- workflow.add_node("intent_agent", run_intent_agent)
227
- workflow.add_node("pm_agent", run_pm_agent)
228
- workflow.add_node("experimenter_agent", run_experimenter_agent)
229
- workflow.add_node("synthesis_agent", run_synthesis_agent)
230
- workflow.add_node("qa_agent", run_qa_agent)
231
- workflow.add_node("archivist_agent", run_archivist_agent)
232
- workflow.add_node("disclaimer_agent", run_disclaimer_agent)
233
-
234
- # NEW Triage Logic
235
- # We now have two separate graphs that are conditionally called by the app
236
- # 1. A simple Triage graph
237
  triage_workflow = StateGraph(AgentState)
238
  triage_workflow.add_node("triage", run_triage_agent)
239
  triage_workflow.set_entry_point("triage")
 
240
  triage_app = triage_workflow.compile()
241
 
242
-
243
- # 2. Planner-only graph
244
  planner_workflow = StateGraph(AgentState)
245
  planner_workflow.add_node("planner", run_planner_agent)
246
  planner_workflow.set_entry_point("planner")
 
247
  planner_app = planner_workflow.compile()
248
 
249
-
250
- # 3. Full execution graph
251
  main_workflow = StateGraph(AgentState)
252
  main_workflow.add_node("memory_retriever", run_memory_retrieval)
253
  main_workflow.add_node("intent_agent", run_intent_agent)
 
1
+ # graph.py (Updated with User-Facing Status Updates)
2
 
3
  import json
4
  import re
 
13
  # --- Setup Logging & Constants ---
14
  setup_logging()
15
  log = get_logger(__name__)
16
+ INITIAL_MAX_REWORK_CYCLES = 3
17
  GPT4O_INPUT_COST_PER_1K_TOKENS = 0.005
18
  GPT4O_OUTPUT_COST_PER_1K_TOKENS = 0.015
19
+ AVG_TOKENS_PER_CALL = 2.0
20
 
21
  # --- Agent State Definition ---
22
  class AgentState(TypedDict):
 
33
  execution_path: List[str]
34
  rework_cycles: int
35
  max_loops: int
36
+ # NEW: Add a field for user-facing status updates
37
+ status_update: str
38
 
39
  # --- Helper for Robust JSON Parsing ---
40
  def parse_json_from_llm(llm_output: str) -> Optional[dict]:
 
53
  # --- LLM Initialization ---
54
  llm = ChatOpenAI(model="gpt-4o", temperature=0.1, max_retries=3, request_timeout=60)
55
 
56
+ # --- Agent Node Functions (Updated with Status Messages) ---
57
  def run_triage_agent(state: AgentState):
58
  log.info("--- triage ---")
59
+ prompt = f"Analyze the user input. Is it a simple conversational greeting or a task? Respond with 'greeting' or 'task'.\n\nUser Input: \"{state['userInput']}\""
60
  response = llm.invoke(prompt)
61
+
62
  if 'greeting' in response.content.lower():
63
+ log.info("Triage result: Simple Greeting.")
 
64
  return {
65
  "draftResponse": "Hello! How can I help you today?",
66
+ "execution_path": ["Triage Agent"],
67
+ "status_update": "Responding to greeting."
68
  }
69
  else:
70
+ log.info("Triage result: Complex Task.")
71
+ return {
72
+ "execution_path": ["Triage Agent"],
73
+ "status_update": "Request requires a plan. Proceeding..."
74
+ }
 
 
 
 
 
 
75
 
 
76
  def run_planner_agent(state: AgentState):
77
+ log.info("--- ✈️ Running Planner Agent ---")
78
  path = state.get('execution_path', []) + ["Planner Agent"]
79
+ prompt = f"Analyze the user's request. Provide a high-level plan and estimate the number of LLM calls for one loop. User Request: \"{state['userInput']}\". Respond in JSON with keys: 'plan' (list of strings), 'estimated_llm_calls_per_loop' (integer)."
80
+
81
  response = llm.invoke(prompt)
82
  plan_data = parse_json_from_llm(response.content)
83
 
84
  if not plan_data:
85
+ return {"pmPlan": {"error": "Failed to create a valid plan."}, "execution_path": path, "status_update": "Error: Could not create a plan."}
86
 
87
  calls_per_loop = plan_data.get('estimated_llm_calls_per_loop', 3)
88
  cost_per_loop = (calls_per_loop * AVG_TOKENS_PER_CALL) * ( (GPT4O_INPUT_COST_PER_1K_TOKENS + GPT4O_OUTPUT_COST_PER_1K_TOKENS) / 2 )
89
  estimated_cost = cost_per_loop * (INITIAL_MAX_REWORK_CYCLES + 1)
90
+
91
  plan_data['max_loops_initial'] = INITIAL_MAX_REWORK_CYCLES
92
  plan_data['estimated_cost_usd'] = round(estimated_cost, 2)
93
  plan_data['cost_per_loop_usd'] = max(0.01, round(cost_per_loop, 3))
94
+
95
  log.info(f"Pre-flight Estimate: {plan_data}")
96
+ return {"pmPlan": plan_data, "execution_path": path, "status_update": "Plan and cost estimate created. Awaiting approval."}
97
 
 
 
98
  def run_memory_retrieval(state: AgentState):
99
  log.info("--- 🧠 Accessing Long-Term Memory ---")
100
  path = state.get('execution_path', []) + ["Memory Retriever"]
 
105
  else:
106
  context = "No relevant memories found."
107
  log.info(context)
108
+ return {"retrievedMemory": context, "execution_path": path, "status_update": "Searching for relevant past information..."}
109
 
110
  def run_intent_agent(state: AgentState):
111
  log.info("--- 🎯 Running Intent Agent ---")
112
  path = state.get('execution_path', []) + ["Intent Agent"]
113
+ prompt = f"Refine the user's request into a clear, actionable 'core objective prompt'.\n\nRelevant Memory:\n{state.get('retrievedMemory')}\n\nUser Request: \"{state.get('userInput')}\"\n\nCore Objective:"
114
  response = llm.invoke(prompt)
115
+ return {"coreObjectivePrompt": response.content, "execution_path": path, "status_update": "Clarifying the main objective..."}
116
 
117
  def run_pm_agent(state: AgentState):
118
  log.info("--- 👷 Running PM Agent ---")
 
121
 
122
  path = state.get('execution_path', []) + ["PM Agent"]
123
  feedback = f"QA Feedback (must be addressed): {state.get('qaFeedback')}" if state.get('qaFeedback') else ""
124
+ prompt = f"Decompose the core objective into a plan. Determine if code execution is needed and define the goal.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\n{feedback}\n\nRespond in JSON with keys: 'plan_steps' (list), 'experiment_needed' (bool), and 'experiment_goal' (str if needed)."
125
+
126
  response = llm.invoke(prompt)
127
  plan = parse_json_from_llm(response.content)
128
 
129
  if not plan:
130
  log.error("PM Agent failed to produce a valid JSON plan.")
131
  plan = {"plan_steps": ["Error: The Project Manager failed to create a valid plan."], "experiment_needed": False}
132
+
133
  log.info(f"Generated Plan: Experiment Needed = {plan.get('experiment_needed', False)}")
134
+ return {"pmPlan": plan, "execution_path": path, "rework_cycles": current_cycles, "status_update": "Breaking down the objective into a detailed plan..."}
135
 
136
  def run_experimenter_agent(state: AgentState):
137
  log.info("--- 🔬 Running Experimenter Agent ---")
138
  path = state.get('execution_path', []) + ["Experimenter Agent"]
139
  if not state.get('pmPlan', {}).get('experiment_needed'):
140
+ return {"experimentCode": None, "experimentResults": None, "execution_path": path, "status_update": "Proceeding without a code experiment."}
141
+
142
  goal = state.get('pmPlan', {}).get('experiment_goal', 'No goal specified.')
143
+ prompt = f"Write a Python script to achieve this goal: {goal}. Print results to standard output."
144
  response = llm.invoke(prompt)
145
  code_match = re.search(r"```python\n(.*?)\n```", response.content, re.DOTALL)
146
  if not code_match:
147
+ log.error("Experimenter failed to generate valid Python code.")
148
+ results = {"success": False, "stderr": "No valid Python code block was generated."}
149
+ return {"experimentCode": "# ERROR: No code generated", "experimentResults": results, "execution_path": path, "status_update": "Error: Failed to write necessary code."}
150
+
151
  code = code_match.group(1).strip()
152
  results = execute_python_code(code)
153
+ return {"experimentCode": code, "experimentResults": results, "execution_path": path, "status_update": "Running code to find a solution..."}
154
 
155
  def run_synthesis_agent(state: AgentState):
156
  log.info("--- ���️ Running Synthesis Agent ---")
 
158
  exp_results = state.get('experimentResults')
159
  results_summary = "No experiment was conducted."
160
  if exp_results:
161
+ results_summary = f"Experiment Output:\nSTDOUT:\n{exp_results.get('stdout', '')}\nSTDERR:\n{exp_results.get('stderr', '')}"
162
+
163
  prompt = f"Synthesize all information into a final response.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\nPlan: {state.get('pmPlan', {}).get('plan_steps')}\n\n{results_summary}\n\nFinal Response:"
164
  response = llm.invoke(prompt)
165
+ return {"draftResponse": response.content, "execution_path": path, "status_update": "Putting together the final response..."}
166
 
167
  def run_qa_agent(state: AgentState):
168
  log.info("--- ✅ Running QA Agent ---")
169
  path = state.get('execution_path', []) + ["QA Agent"]
170
+ prompt = f"Review the draft response based on the core objective. Respond ONLY with 'APPROVED' or provide concise feedback for rework.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\nDraft: {state.get('draftResponse')}"
171
  response = llm.invoke(prompt)
172
  if "APPROVED" in response.content.upper():
173
+ return {"approved": True, "qaFeedback": None, "execution_path": path, "status_update": "Response approved!"}
174
  else:
175
+ return {"approved": False, "qaFeedback": response.content or "No specific feedback.", "execution_path": path, "status_update": "Response needs improvement. Reworking..."}
176
 
177
  def run_archivist_agent(state: AgentState):
178
  log.info("--- 💾 Running Archivist Agent ---")
179
  path = state.get('execution_path', []) + ["Archivist Agent"]
180
+ summary_prompt = f"Create a concise summary of this successful task for long-term memory.\n\nCore Objective: {state.get('coreObjectivePrompt')}\n\nFinal Response: {state.get('draftResponse')}\n\nMemory Summary:"
 
 
181
  response = llm.invoke(summary_prompt)
 
182
  memory_manager.add_to_memory(response.content, {"objective": state.get('coreObjectivePrompt')})
183
+ return {"execution_path": path, "status_update": "Saving key learnings for future reference..."}
184
 
185
  def run_disclaimer_agent(state: AgentState):
186
  log.warning("--- ⚠️ Running Disclaimer Agent ---")
187
  path = state.get('execution_path', []) + ["Disclaimer Agent"]
188
+ disclaimer = "**DISCLAIMER: The process was stopped after exhausting the budget. The following response is the best available draft and may be incomplete.**\n\n---\n\n"
189
+ final_response = disclaimer + state.get('draftResponse', "No response was generated.")
190
+ return {"draftResponse": final_response, "execution_path": path, "status_update": "Budget limit reached. Preparing final draft..."}
 
191
 
192
  # --- Conditional Edges & Graph Definition ---
 
193
  def should_continue(state: AgentState):
194
  log.info("--- 🤔 Decision: Is the response QA approved? ---")
195
  if state.get("approved"):
196
  log.info("Routing to: Archivist (Success Path)")
197
  return "archivist_agent"
 
198
  if state.get("rework_cycles", 0) > state.get("max_loops", 0):
199
+ log.error(f"BUDGET LIMIT REACHED after {state.get('rework_cycles', 0)-1} cycles.")
200
  return "disclaimer_agent"
201
  else:
202
  log.info("Routing to: PM Agent for rework")
 
206
  decision = "experimenter_agent" if state.get('pmPlan', {}).get('experiment_needed') else "synthesis_agent"
207
  return decision
208
 
209
+ # --- Build the Graphs ---
210
+ # 1. Triage Graph
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  triage_workflow = StateGraph(AgentState)
212
  triage_workflow.add_node("triage", run_triage_agent)
213
  triage_workflow.set_entry_point("triage")
214
+ triage_workflow.add_edge("triage", END)
215
  triage_app = triage_workflow.compile()
216
 
217
+ # 2. Planner-only Graph
 
218
  planner_workflow = StateGraph(AgentState)
219
  planner_workflow.add_node("planner", run_planner_agent)
220
  planner_workflow.set_entry_point("planner")
221
+ planner_workflow.add_edge("planner", END)
222
  planner_app = planner_workflow.compile()
223
 
224
+ # 3. Full Execution Graph
 
225
  main_workflow = StateGraph(AgentState)
226
  main_workflow.add_node("memory_retriever", run_memory_retrieval)
227
  main_workflow.add_node("intent_agent", run_intent_agent)