D3MI4N commited on
Commit
9d3db05
Β·
1 Parent(s): 588762b

debugging

Browse files
Files changed (2) hide show
  1. gaia_graph.py +15 -17
  2. requirements.txt +1 -0
gaia_graph.py CHANGED
@@ -9,10 +9,10 @@ from dotenv import load_dotenv
9
  from langchain.tools import Tool
10
  from langchain.agents import initialize_agent, AgentType
11
  from langchain_openai import ChatOpenAI
12
- from langgraph.graph import StateGraph
13
 
 
14
  from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
15
- import openai.error
16
 
17
  # ─── Load Environment Variables ──────────────────────────────────────────────
18
  load_dotenv()
@@ -75,10 +75,10 @@ search_tool = Tool(
75
  description="Useful for answering factual questions using a search engine.",
76
  )
77
 
78
- # ─── Create LLM Agent with retry ──────────────────────────────────────────────
79
  llm = ChatOpenAI(
80
  temperature=0.0,
81
- model="gpt-4o-mini", # switched to gpt-4o-mini for higher token limits
82
  openai_api_key=OPENAI_API_KEY
83
  )
84
 
@@ -90,14 +90,6 @@ agent_executor = initialize_agent(
90
  handle_parsing_errors=True,
91
  )
92
 
93
- @retry(
94
- wait=wait_exponential(multiplier=1, min=1, max=10),
95
- stop=stop_after_attempt(5),
96
- retry=retry_if_exception_type(openai.error.RateLimitError)
97
- )
98
- def run_agent_with_retry(prompt: str) -> str:
99
- return agent_executor.run(prompt)
100
-
101
  # ─── Clean Output ────────────────────────────────────────────────────────────
102
  def clean_answer(ans: str) -> str:
103
  if "```" in ans:
@@ -113,12 +105,18 @@ class GaiaState(TypedDict):
113
  question: str
114
  answer: str
115
 
116
- # ─── Define Node Function with retry ──────────────────────────────────────────
 
 
 
 
 
 
 
 
 
117
  def agent_node(state: GaiaState) -> GaiaState:
118
- try:
119
- raw = run_agent_with_retry(state["question"])
120
- except openai.error.RateLimitError:
121
- raw = "Error: rate limit exceeded after retries."
122
  return {"question": state["question"], "answer": clean_answer(raw)}
123
 
124
  # ─── Build LangGraph ─────────────────────────────────────────────────────────
 
9
  from langchain.tools import Tool
10
  from langchain.agents import initialize_agent, AgentType
11
  from langchain_openai import ChatOpenAI
12
+ from langgraph.graph import StateGraph, END
13
 
14
+ import openai # Import openai to catch RateLimitError
15
  from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
 
16
 
17
  # ─── Load Environment Variables ──────────────────────────────────────────────
18
  load_dotenv()
 
75
  description="Useful for answering factual questions using a search engine.",
76
  )
77
 
78
+ # ─── Create LLM Agent ────────────────────────────────────────────────────────
79
  llm = ChatOpenAI(
80
  temperature=0.0,
81
+ model="gpt-4o",
82
  openai_api_key=OPENAI_API_KEY
83
  )
84
 
 
90
  handle_parsing_errors=True,
91
  )
92
 
 
 
 
 
 
 
 
 
93
  # ─── Clean Output ────────────────────────────────────────────────────────────
94
  def clean_answer(ans: str) -> str:
95
  if "```" in ans:
 
105
  question: str
106
  answer: str
107
 
108
+ # ─── Retry on Rate Limit Error ───────────────────────────────────────────────
109
+ @retry(
110
+ wait=wait_exponential(multiplier=1, min=1, max=10),
111
+ stop=stop_after_attempt(5),
112
+ retry=retry_if_exception_type(openai.error.RateLimitError)
113
+ )
114
+ def run_agent_with_retry(question: str) -> str:
115
+ return agent_executor.run(question)
116
+
117
+ # ─── Define Node Function ────────────────────────────────────────────────────
118
  def agent_node(state: GaiaState) -> GaiaState:
119
+ raw = run_agent_with_retry(state["question"])
 
 
 
120
  return {"question": state["question"], "answer": clean_answer(raw)}
121
 
122
  # ─── Build LangGraph ─────────────────────────────────────────────────────────
requirements.txt CHANGED
@@ -37,3 +37,4 @@ PyYAML
37
  hf-xet~=1.1.1
38
  langchain-openai
39
  tenacity
 
 
37
  hf-xet~=1.1.1
38
  langchain-openai
39
  tenacity
40
+ openai