Scott Cogan
commited on
Commit
·
30541b5
1
Parent(s):
72ec790
latest requirements
Browse files- app.py +81 -31
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -16,6 +16,8 @@ import operator
|
|
| 16 |
from langgraph.prebuilt import ToolExecutor
|
| 17 |
from langchain_core.tools import tool
|
| 18 |
from utilities import get_file
|
|
|
|
|
|
|
| 19 |
|
| 20 |
# Constants
|
| 21 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
|
@@ -144,45 +146,93 @@ class BasicAgent:
|
|
| 144 |
|
| 145 |
print("BasicAgent initialized.")
|
| 146 |
|
|
|
|
| 147 |
def call_model(self, state: AgentState) -> AgentState:
|
| 148 |
-
"""Call the model to generate a response."""
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
def call_tools(self, state: AgentState) -> AgentState:
|
| 154 |
"""Call the tools based on the model's response."""
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
|
| 170 |
async def __call__(self, question: str, task_id: str) -> str:
|
| 171 |
-
"""Process a question and return the answer."""
|
| 172 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 173 |
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
def run_and_submit_all(profile):
|
| 188 |
"""
|
|
|
|
| 16 |
from langgraph.prebuilt import ToolExecutor
|
| 17 |
from langchain_core.tools import tool
|
| 18 |
from utilities import get_file
|
| 19 |
+
import time
|
| 20 |
+
from tenacity import retry, stop_after_attempt, wait_exponential
|
| 21 |
|
| 22 |
# Constants
|
| 23 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
|
|
|
| 146 |
|
| 147 |
print("BasicAgent initialized.")
|
| 148 |
|
| 149 |
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=60))
|
| 150 |
def call_model(self, state: AgentState) -> AgentState:
|
| 151 |
+
"""Call the model to generate a response with retry logic."""
|
| 152 |
+
try:
|
| 153 |
+
messages = state["messages"]
|
| 154 |
+
response = self.llm.invoke([self.sys_msg] + messages)
|
| 155 |
+
|
| 156 |
+
if not response or not response.content:
|
| 157 |
+
print("Empty response from model, retrying...")
|
| 158 |
+
raise ValueError("Empty response from model")
|
| 159 |
+
|
| 160 |
+
return {"messages": [response], "next": "tools"}
|
| 161 |
+
except Exception as e:
|
| 162 |
+
print(f"Error in call_model: {str(e)}")
|
| 163 |
+
if "429" in str(e):
|
| 164 |
+
print("Rate limit hit, waiting before retry...")
|
| 165 |
+
time.sleep(60) # Wait for 60 seconds before retry
|
| 166 |
+
raise
|
| 167 |
|
| 168 |
def call_tools(self, state: AgentState) -> AgentState:
|
| 169 |
"""Call the tools based on the model's response."""
|
| 170 |
+
try:
|
| 171 |
+
messages = state["messages"]
|
| 172 |
+
last_message = messages[-1]
|
| 173 |
+
|
| 174 |
+
if isinstance(last_message, AIMessage):
|
| 175 |
+
# Extract tool calls from the message
|
| 176 |
+
tool_calls = last_message.tool_calls
|
| 177 |
+
if tool_calls:
|
| 178 |
+
for tool_call in tool_calls:
|
| 179 |
+
try:
|
| 180 |
+
tool_name = tool_call.name
|
| 181 |
+
tool_args = tool_call.args
|
| 182 |
+
result = self.tool_executor.invoke(tool_name, tool_args)
|
| 183 |
+
messages.append(AIMessage(content=f"Tool result: {result}"))
|
| 184 |
+
except Exception as e:
|
| 185 |
+
print(f"Error executing tool {tool_name}: {str(e)}")
|
| 186 |
+
messages.append(AIMessage(content=f"Tool error: {str(e)}"))
|
| 187 |
+
|
| 188 |
+
return {"messages": messages, "next": "agent"}
|
| 189 |
+
except Exception as e:
|
| 190 |
+
print(f"Error in call_tools: {str(e)}")
|
| 191 |
+
return {"messages": messages, "next": "agent"}
|
| 192 |
|
| 193 |
async def __call__(self, question: str, task_id: str) -> str:
|
| 194 |
+
"""Process a question and return the answer with error handling."""
|
| 195 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 196 |
|
| 197 |
+
try:
|
| 198 |
+
# Create initial state
|
| 199 |
+
initial_state = {
|
| 200 |
+
"messages": [HumanMessage(content=f'Task id: {task_id}\n {question}')],
|
| 201 |
+
"next": "agent"
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
# Process through the graph with retry logic
|
| 205 |
+
max_retries = 3
|
| 206 |
+
retry_count = 0
|
| 207 |
+
last_error = None
|
| 208 |
+
|
| 209 |
+
while retry_count < max_retries:
|
| 210 |
+
try:
|
| 211 |
+
result = self.app.invoke(initial_state)
|
| 212 |
+
final_message = result["messages"][-1]
|
| 213 |
+
|
| 214 |
+
if isinstance(final_message, AIMessage) and final_message.content:
|
| 215 |
+
return final_message.content
|
| 216 |
+
else:
|
| 217 |
+
raise ValueError("Empty or invalid response")
|
| 218 |
+
|
| 219 |
+
except Exception as e:
|
| 220 |
+
last_error = e
|
| 221 |
+
retry_count += 1
|
| 222 |
+
if "429" in str(e):
|
| 223 |
+
wait_time = 60 * retry_count
|
| 224 |
+
print(f"Rate limit hit, waiting {wait_time} seconds before retry {retry_count}/{max_retries}")
|
| 225 |
+
await asyncio.sleep(wait_time)
|
| 226 |
+
else:
|
| 227 |
+
print(f"Error in processing, retry {retry_count}/{max_retries}: {str(e)}")
|
| 228 |
+
await asyncio.sleep(5)
|
| 229 |
+
|
| 230 |
+
print(f"All retries failed. Last error: {str(last_error)}")
|
| 231 |
+
return "Unable to generate answer after multiple attempts"
|
| 232 |
+
|
| 233 |
+
except Exception as e:
|
| 234 |
+
print(f"Fatal error in agent: {str(e)}")
|
| 235 |
+
return f"Error: {str(e)}"
|
| 236 |
|
| 237 |
def run_and_submit_all(profile):
|
| 238 |
"""
|
requirements.txt
CHANGED
|
@@ -11,4 +11,5 @@ pytube
|
|
| 11 |
google
|
| 12 |
google-generativeai>=0.3.1
|
| 13 |
duckduckgo-search
|
| 14 |
-
pandas
|
|
|
|
|
|
| 11 |
google
|
| 12 |
google-generativeai>=0.3.1
|
| 13 |
duckduckgo-search
|
| 14 |
+
pandas
|
| 15 |
+
tenacity>=8.0.1
|