Scott Cogan commited on
Commit
e9577aa
·
1 Parent(s): 3f8c358
Files changed (2) hide show
  1. app.py +75 -13
  2. requirements.txt +12 -15
app.py CHANGED
@@ -203,12 +203,23 @@ def log_message(message: BaseMessage, prefix: str = ""):
203
 
204
  class BasicAgent:
205
  def __init__(self):
206
- self.llm = ChatGoogleGenerativeAI(
 
207
  model="gemini-2.5-flash-preview-05-20",
208
  max_tokens=8192,
209
  temperature=0
210
  )
211
 
 
 
 
 
 
 
 
 
 
 
212
  # Create tool executor
213
  self.tools = [
214
  get_file, analyse_excel, add_numbers, transcribe_audio,
@@ -262,11 +273,11 @@ class BasicAgent:
262
  # Set recursion limit through environment variable
263
  os.environ["LANGRAPH_RECURSION_LIMIT"] = "50"
264
 
265
- logger.info("BasicAgent initialized.")
266
 
267
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=60))
268
  def call_model(self, state: AgentState) -> AgentState:
269
- """Call the model to generate a response with retry logic."""
270
  try:
271
  messages = state["messages"]
272
  logger.info("\n=== Model Input ===")
@@ -274,9 +285,9 @@ class BasicAgent:
274
  for msg in messages:
275
  log_message(msg, " ")
276
 
277
- # Add tools to the model invocation
278
  try:
279
- response = self.llm.invoke(
280
  [self.sys_msg] + messages,
281
  tools=[{"type": "function", "function": {
282
  "name": "google_search",
@@ -297,12 +308,39 @@ class BasicAgent:
297
  error_str = str(e)
298
  if "429" in error_str:
299
  if "GenerateRequestsPerDayPerProjectPerModel-FreeTier" in error_str:
300
- logger.warning("Daily quota limit reached, providing fallback response")
301
- # For daily quota limits, provide a fallback response and end the conversation
302
- return {
303
- "messages": [AIMessage(content="I've reached my daily limit for processing requests. Please try again tomorrow or contact support for assistance.")],
304
- "next": END
305
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  else:
307
  # For other rate limits, wait and retry
308
  wait_time = 60
@@ -348,8 +386,32 @@ class BasicAgent:
348
  error_str = str(e)
349
  if "429" in error_str:
350
  if "GenerateRequestsPerDayPerProjectPerModel-FreeTier" in error_str:
351
- logger.warning("Daily quota limit reached, providing fallback response")
352
- return {"messages": [AIMessage(content="I've reached my daily limit for processing requests. Please try again tomorrow or contact support for assistance.")], "next": END}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  else:
354
  logger.warning("Rate limit hit, waiting before retry...")
355
  time.sleep(60) # Wait for 60 seconds before retry
 
203
 
204
  class BasicAgent:
205
  def __init__(self):
206
+ # Initialize primary LLM (Gemini)
207
+ self.primary_llm = ChatGoogleGenerativeAI(
208
  model="gemini-2.5-flash-preview-05-20",
209
  max_tokens=8192,
210
  temperature=0
211
  )
212
 
213
+ # Initialize fallback LLM (if available)
214
+ self.fallback_llm = None
215
+ if os.getenv("OPENAI_API_KEY"):
216
+ from langchain_openai import ChatOpenAI
217
+ self.fallback_llm = ChatOpenAI(
218
+ model="gpt-3.5-turbo",
219
+ temperature=0,
220
+ max_tokens=4096
221
+ )
222
+
223
  # Create tool executor
224
  self.tools = [
225
  get_file, analyse_excel, add_numbers, transcribe_audio,
 
273
  # Set recursion limit through environment variable
274
  os.environ["LANGRAPH_RECURSION_LIMIT"] = "50"
275
 
276
+ logger.info("BasicAgent initialized with fallback LLM support.")
277
 
278
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=60))
279
  def call_model(self, state: AgentState) -> AgentState:
280
+ """Call the model to generate a response with retry logic and fallback support."""
281
  try:
282
  messages = state["messages"]
283
  logger.info("\n=== Model Input ===")
 
285
  for msg in messages:
286
  log_message(msg, " ")
287
 
288
+ # Try primary LLM first
289
  try:
290
+ response = self.primary_llm.invoke(
291
  [self.sys_msg] + messages,
292
  tools=[{"type": "function", "function": {
293
  "name": "google_search",
 
308
  error_str = str(e)
309
  if "429" in error_str:
310
  if "GenerateRequestsPerDayPerProjectPerModel-FreeTier" in error_str:
311
+ logger.warning("Daily quota limit reached for primary LLM, trying fallback")
312
+ if self.fallback_llm:
313
+ try:
314
+ response = self.fallback_llm.invoke(
315
+ [self.sys_msg] + messages,
316
+ tools=[{"type": "function", "function": {
317
+ "name": "google_search",
318
+ "description": "Search for information on the web",
319
+ "parameters": {
320
+ "type": "object",
321
+ "properties": {
322
+ "query": {
323
+ "type": "string",
324
+ "description": "The search query"
325
+ }
326
+ },
327
+ "required": ["query"]
328
+ }
329
+ }}]
330
+ )
331
+ logger.info("Successfully used fallback LLM")
332
+ except Exception as fallback_error:
333
+ logger.error(f"Fallback LLM also failed: {str(fallback_error)}")
334
+ return {
335
+ "messages": [AIMessage(content="All LLM services are currently unavailable. Please try again later.")],
336
+ "next": END
337
+ }
338
+ else:
339
+ logger.warning("No fallback LLM available")
340
+ return {
341
+ "messages": [AIMessage(content="I've reached my daily limit for processing requests. Please try again tomorrow or contact support for assistance.")],
342
+ "next": END
343
+ }
344
  else:
345
  # For other rate limits, wait and retry
346
  wait_time = 60
 
386
  error_str = str(e)
387
  if "429" in error_str:
388
  if "GenerateRequestsPerDayPerProjectPerModel-FreeTier" in error_str:
389
+ logger.warning("Daily quota limit reached, trying fallback")
390
+ if self.fallback_llm:
391
+ try:
392
+ response = self.fallback_llm.invoke(
393
+ [self.sys_msg] + messages,
394
+ tools=[{"type": "function", "function": {
395
+ "name": "google_search",
396
+ "description": "Search for information on the web",
397
+ "parameters": {
398
+ "type": "object",
399
+ "properties": {
400
+ "query": {
401
+ "type": "string",
402
+ "description": "The search query"
403
+ }
404
+ },
405
+ "required": ["query"]
406
+ }
407
+ }}]
408
+ )
409
+ return {"messages": [response], "next": "tools"}
410
+ except Exception as fallback_error:
411
+ logger.error(f"Fallback LLM also failed: {str(fallback_error)}")
412
+ return {"messages": [AIMessage(content="All LLM services are currently unavailable. Please try again later.")], "next": END}
413
+ else:
414
+ return {"messages": [AIMessage(content="I've reached my daily limit for processing requests. Please try again tomorrow or contact support for assistance.")], "next": END}
415
  else:
416
  logger.warning("Rate limit hit, waiting before retry...")
417
  time.sleep(60) # Wait for 60 seconds before retry
requirements.txt CHANGED
@@ -1,15 +1,12 @@
1
- langchain>=0.3,<0.4
2
- langchain-core>=0.3,<0.4
3
- langchain-community>=0.3,<0.4
4
- langchain-experimental>=0.3,<0.4
5
- langchain-google-genai>=2,<3
6
- langgraph==0.2.20
7
- gradio>=4.44.1
8
- requests
9
- openpyxl
10
- pytube
11
- google
12
- google-generativeai>=0.3.1
13
- duckduckgo-search
14
- pandas
15
- tenacity>=8.0.1
 
1
+ langchain==0.1.12
2
+ langchain-google-genai==0.0.11
3
+ langchain-core==0.1.32
4
+ langgraph==0.0.27
5
+ gradio==4.19.2
6
+ pandas==2.2.1
7
+ openpyxl==3.1.2
8
+ tenacity==8.2.3
9
+ google-generativeai==0.3.2
10
+ langchain-openai==0.0.8
11
+ python-dotenv==1.0.1
12
+ requests==2.31.0