Ghisalbertifederico commited on
Commit
ccd2ac6
·
verified ·
1 Parent(s): 2ea61e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -6
app.py CHANGED
@@ -240,19 +240,33 @@ def synthesize_response(state: AgentState) -> AgentState:
240
  if state.get("answer"):
241
  print(f"[SYNTHESIZE] skipped — answer already set by tool")
242
  return state
243
- prompt = [
244
- SystemMessage(content=get_prompt("final_llm_system")),
 
 
245
  HumanMessage(
246
  content=get_prompt(
247
- prompt_key="final_llm_user",
248
  question=state["question"],
249
  context=state["context"],
250
  )
251
  ),
252
  ]
253
- raw = _llm_answer.invoke(prompt).content.strip()
254
- print(f"\n[REASONING]\n{raw}\n")
255
- state["answer"] = raw
 
 
 
 
 
 
 
 
 
 
 
 
256
  return state
257
 
258
  def format_output(state: AgentState) -> AgentState:
 
240
  if state.get("answer"):
241
  print(f"[SYNTHESIZE] skipped — answer already set by tool")
242
  return state
243
+
244
+ # Pass 1: chain-of-thought reasoning
245
+ reasoning_prompt = [
246
+ SystemMessage(content=get_prompt("reasoning_system")),
247
  HumanMessage(
248
  content=get_prompt(
249
+ prompt_key="reasoning_user",
250
  question=state["question"],
251
  context=state["context"],
252
  )
253
  ),
254
  ]
255
+ reasoning = _llm_answer.invoke(reasoning_prompt).content.strip()
256
+ print(f"\n[REASONING]\n{reasoning}\n")
257
+
258
+ # Pass 2: extract the final answer cleanly
259
+ extract_prompt = [
260
+ SystemMessage(content=get_prompt("extract_system")),
261
+ HumanMessage(
262
+ content=get_prompt(
263
+ prompt_key="extract_user",
264
+ reasoning=reasoning,
265
+ )
266
+ ),
267
+ ]
268
+ answer = _llm_answer.invoke(extract_prompt).content.strip()
269
+ state["answer"] = answer
270
  return state
271
 
272
  def format_output(state: AgentState) -> AgentState: