Scott Cogan commited on
Commit
b82a7ed
·
1 Parent(s): 7153704
Files changed (1) hide show
  1. app.py +79 -13
app.py CHANGED
@@ -169,22 +169,29 @@ class BasicAgent:
169
 
170
  # System message
171
  self.sys_msg = SystemMessage('''You are a general AI assistant. I will ask you a question. Follow these steps:
 
172
  1. First, use the google_search tool to find relevant information about the question.
173
  2. Analyze the search results to find the specific information needed.
174
  3. If needed, use additional tools to gather more information.
175
  4. Only after gathering all necessary information, provide YOUR FINAL ANSWER.
176
-
177
- YOUR FINAL ANSWER should be:
178
- - A number (not written as a word) if asked for a quantity
179
- - As few words as possible if asked for a string
180
- - A comma separated list of numbers and/or strings if asked for a list
181
-
182
  Rules for formatting:
183
  - For numbers: Don't use commas or units ($, %, etc.) unless specified
184
  - For strings: Don't use articles or abbreviations
185
  - For lists: Apply the above rules based on whether each element is a number or string
186
-
187
- You MUST use tools to verify information before providing your final answer.''')
 
 
 
 
 
 
188
 
189
  # Create the graph
190
  self.workflow = StateGraph(AgentState)
@@ -215,7 +222,38 @@ class BasicAgent:
215
  for msg in messages:
216
  log_message(msg, " ")
217
 
218
- response = self.llm.invoke([self.sys_msg] + messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
  logger.info("\n=== Model Output ===")
221
  log_message(response, " ")
@@ -223,13 +261,41 @@ class BasicAgent:
223
  if not response or not response.content:
224
  logger.error("Empty response from model")
225
  raise ValueError("Empty response from model")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
- return {"messages": [response], "next": "tools"}
 
 
 
 
 
 
228
  except Exception as e:
229
  logger.error(f"Error in call_model: {str(e)}")
230
- if "429" in str(e):
231
- logger.warning("Rate limit hit, waiting before retry...")
232
- time.sleep(60) # Wait for 60 seconds before retry
 
 
 
 
 
233
  raise
234
 
235
  def call_tools(self, state: AgentState) -> AgentState:
 
169
 
170
  # System message
171
  self.sys_msg = SystemMessage('''You are a general AI assistant. I will ask you a question. Follow these steps:
172
+
173
  1. First, use the google_search tool to find relevant information about the question.
174
  2. Analyze the search results to find the specific information needed.
175
  3. If needed, use additional tools to gather more information.
176
  4. Only after gathering all necessary information, provide YOUR FINAL ANSWER.
177
+
178
+ YOUR FINAL ANSWER must be:
179
+ - For numbers: Just the digit (e.g., "7" not "seven" or "7 albums")
180
+ - For strings: As few words as possible
181
+ - For lists: A comma-separated list of numbers and/or strings
182
+
183
  Rules for formatting:
184
  - For numbers: Don't use commas or units ($, %, etc.) unless specified
185
  - For strings: Don't use articles or abbreviations
186
  - For lists: Apply the above rules based on whether each element is a number or string
187
+
188
+ IMPORTANT:
189
+ - You MUST use the google_search tool before providing your final answer
190
+ - Your final answer should ONLY be the requested information, no explanations
191
+ - If you need to search again, use the tool again
192
+ - Do not provide detailed analysis in your final answer
193
+ - If you encounter rate limits, inform the user that you need to search for information
194
+ - Never make up information - if you can't find it, say so''')
195
 
196
  # Create the graph
197
  self.workflow = StateGraph(AgentState)
 
222
  for msg in messages:
223
  log_message(msg, " ")
224
 
225
+ # Add tools to the model invocation
226
+ try:
227
+ response = self.llm.invoke(
228
+ [self.sys_msg] + messages,
229
+ tools=[{"type": "function", "function": {
230
+ "name": "google_search",
231
+ "description": "Search for information on the web",
232
+ "parameters": {
233
+ "type": "object",
234
+ "properties": {
235
+ "query": {
236
+ "type": "string",
237
+ "description": "The search query"
238
+ }
239
+ },
240
+ "required": ["query"]
241
+ }
242
+ }}]
243
+ )
244
+ except Exception as e:
245
+ error_str = str(e)
246
+ if "429" in error_str:
247
+ if "GenerateRequestsPerDayPerProjectPerModel-FreeTier" in error_str:
248
+ logger.warning("Daily quota limit reached, providing fallback response")
249
+ # For daily quota limits, provide a fallback response
250
+ return {"messages": [AIMessage(content="I've reached my daily limit for processing requests. Please try again tomorrow or contact support for assistance.")], "next": END}
251
+ else:
252
+ logger.warning("Rate limit hit, waiting before retry...")
253
+ time.sleep(60) # Wait for 60 seconds before retry
254
+ raise # Re-raise to trigger retry
255
+ else:
256
+ raise
257
 
258
  logger.info("\n=== Model Output ===")
259
  log_message(response, " ")
 
261
  if not response or not response.content:
262
  logger.error("Empty response from model")
263
  raise ValueError("Empty response from model")
264
+
265
+ # Check if the response contains a tool call
266
+ if hasattr(response, 'tool_calls') and response.tool_calls:
267
+ return {"messages": [response], "next": "tools"}
268
+ else:
269
+ # If no tool call, check if it's a final answer
270
+ content = response.content.strip()
271
+
272
+ # Clean up the content to ensure it's in the correct format
273
+ if content.startswith("**Final Answer**: "):
274
+ content = content.replace("**Final Answer**: ", "").strip()
275
+
276
+ # For numbers, ensure they're in the correct format
277
+ if content.replace(".", "").isdigit():
278
+ # Remove any decimal places for whole numbers
279
+ if float(content).is_integer():
280
+ content = str(int(float(content)))
281
 
282
+ # Check if the content is a valid final answer
283
+ if content.isdigit() or (content.startswith('[') and content.endswith(']')):
284
+ return {"messages": [AIMessage(content=content)], "next": END}
285
+ else:
286
+ # If not a final answer, continue the conversation
287
+ return {"messages": [response], "next": "agent"}
288
+
289
  except Exception as e:
290
  logger.error(f"Error in call_model: {str(e)}")
291
+ error_str = str(e)
292
+ if "429" in error_str:
293
+ if "GenerateRequestsPerDayPerProjectPerModel-FreeTier" in error_str:
294
+ logger.warning("Daily quota limit reached, providing fallback response")
295
+ return {"messages": [AIMessage(content="I've reached my daily limit for processing requests. Please try again tomorrow or contact support for assistance.")], "next": END}
296
+ else:
297
+ logger.warning("Rate limit hit, waiting before retry...")
298
+ time.sleep(60) # Wait for 60 seconds before retry
299
  raise
300
 
301
  def call_tools(self, state: AgentState) -> AgentState: