AlessandroMasala commited on
Commit
05d645d
·
verified ·
1 Parent(s): adcaed9

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +6 -7
agent.py CHANGED
@@ -11,7 +11,6 @@ from dotenv import load_dotenv
11
  from langgraph.graph import StateGraph, END
12
  from langchain.tools import Tool as LangTool
13
  from langchain_core.runnables import RunnableLambda
14
- from pathlib import Path
15
 
16
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
17
  from langchain_community.llms import HuggingFacePipeline
@@ -185,7 +184,7 @@ def analyze_question(state: AgentState) -> AgentState:
185
  """
186
 
187
  try:
188
- response = llm.invoke(analysis_prompt).content
189
  state["conversation_history"].append({"role": "analysis", "content": response})
190
  state["current_step"] = AgentStep.SELECT_TOOLS.value
191
  except Exception as e:
@@ -264,7 +263,7 @@ def select_tools(state: AgentState) -> AgentState:
264
  A:
265
  """
266
 
267
- llm_tools = ast.literal_eval(llm.invoke(tools_prompt).content.strip())
268
  if not isinstance(llm_tools, list):
269
  llm_tools = []
270
  print(f"LLM suggested tools: {llm_tools}")
@@ -378,7 +377,7 @@ def synthesize_answer(state: AgentState) -> AgentState:
378
 
379
  Your step-by-step analysis:"""
380
 
381
- cot_response = llm.invoke(cot_prompt).content
382
 
383
  final_answer_prompt = f"""You are a precise assistant tasked with deriving the **final answer** from the step-by-step analysis below.
384
 
@@ -399,7 +398,7 @@ def synthesize_answer(state: AgentState) -> AgentState:
399
 
400
 
401
  try:
402
- response = llm.invoke(final_answer_prompt).content
403
  state["final_answer"] = response
404
  state["current_step"] = AgentStep.COMPLETE.value
405
  except Exception as e:
@@ -424,7 +423,7 @@ def error_recovery(state: AgentState) -> AgentState:
424
  Provide a helpful response even if you cannot access external tools.
425
  Be clear about any limitations in your answer.
426
  """
427
- response = llm.invoke(fallback_prompt).content
428
  state["final_answer"] = f"Using available knowledge (some tools unavailable): {response}"
429
  state["current_step"] = AgentStep.COMPLETE.value
430
  except Exception as e:
@@ -543,7 +542,7 @@ class GaiaAgent:
543
 
544
  # Try fallback direct LLM response
545
  try:
546
- fallback_response = llm.invoke(f"Please answer this question: {question}").content
547
  return f"Fallback response: {fallback_response}"
548
  except:
549
  return error_msg
 
11
  from langgraph.graph import StateGraph, END
12
  from langchain.tools import Tool as LangTool
13
  from langchain_core.runnables import RunnableLambda
 
14
 
15
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
16
  from langchain_community.llms import HuggingFacePipeline
 
184
  """
185
 
186
  try:
187
+ response = llm.invoke(analysis_prompt)
188
  state["conversation_history"].append({"role": "analysis", "content": response})
189
  state["current_step"] = AgentStep.SELECT_TOOLS.value
190
  except Exception as e:
 
263
  A:
264
  """
265
 
266
+ llm_tools = ast.literal_eval(llm.invoke(tools_prompt).strip())
267
  if not isinstance(llm_tools, list):
268
  llm_tools = []
269
  print(f"LLM suggested tools: {llm_tools}")
 
377
 
378
  Your step-by-step analysis:"""
379
 
380
+ cot_response = llm.invoke(cot_prompt)
381
 
382
  final_answer_prompt = f"""You are a precise assistant tasked with deriving the **final answer** from the step-by-step analysis below.
383
 
 
398
 
399
 
400
  try:
401
+ response = llm.invoke(final_answer_prompt)
402
  state["final_answer"] = response
403
  state["current_step"] = AgentStep.COMPLETE.value
404
  except Exception as e:
 
423
  Provide a helpful response even if you cannot access external tools.
424
  Be clear about any limitations in your answer.
425
  """
426
+ response = llm.invoke(fallback_prompt)
427
  state["final_answer"] = f"Using available knowledge (some tools unavailable): {response}"
428
  state["current_step"] = AgentStep.COMPLETE.value
429
  except Exception as e:
 
542
 
543
  # Try fallback direct LLM response
544
  try:
545
+ fallback_response = llm.invoke(f"Please answer this question: {question}")
546
  return f"Fallback response: {fallback_response}"
547
  except:
548
  return error_msg