Spaces:
Running
Running
Adding a loop in case rate limits are reached or exceeded in initial run
#2
by
Kingstonsmj
- opened
paper.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
import json
|
| 3 |
import logging
|
|
|
|
| 4 |
from datetime import datetime, timedelta
|
| 5 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 6 |
from langchain.schema import SystemMessage, HumanMessage
|
|
@@ -32,7 +33,7 @@ class Task:
|
|
| 32 |
google_api_key = os.getenv("GEMINI_API_KEY") # 실제 Google API 키 사용
|
| 33 |
if not google_api_key:
|
| 34 |
logging.error("GEMINI_API_KEY is not set in the environment variables.")
|
| 35 |
-
llm = ChatGoogleGenerativeAI(model="gemini-2.
|
| 36 |
|
| 37 |
# -------------------------------------------------------------------------------
|
| 38 |
# Define Academic Research Agents
|
|
@@ -169,6 +170,7 @@ def run_task(task: Task, input_text: str) -> str:
|
|
| 169 |
Executes the given task using the associated agent's LLM and returns the response content.
|
| 170 |
"""
|
| 171 |
try:
|
|
|
|
| 172 |
if not isinstance(task, Task):
|
| 173 |
raise ValueError(f"Expected 'task' to be an instance of Task, got {type(task)}")
|
| 174 |
if not hasattr(task, 'agent') or not isinstance(task.agent, Agent):
|
|
@@ -190,10 +192,26 @@ def run_task(task: Task, input_text: str) -> str:
|
|
| 190 |
SystemMessage(content=system_input),
|
| 191 |
HumanMessage(content=task_input)
|
| 192 |
]
|
|
|
|
|
|
|
|
|
|
| 193 |
response = task.agent.llm.invoke(messages)
|
|
|
|
| 194 |
if not response or not response.content:
|
| 195 |
raise ValueError("Empty response from LLM.")
|
| 196 |
return response.content
|
| 197 |
except Exception as e:
|
| 198 |
logging.error(f"Error in task '{task.agent.role}': {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
return f"Error in {task.agent.role}: {e}"
|
|
|
|
| 1 |
import os
|
| 2 |
import json
|
| 3 |
import logging
|
| 4 |
+
import time
|
| 5 |
from datetime import datetime, timedelta
|
| 6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 7 |
from langchain.schema import SystemMessage, HumanMessage
|
|
|
|
| 33 |
google_api_key = os.getenv("GEMINI_API_KEY") # 실제 Google API 키 사용
|
| 34 |
if not google_api_key:
|
| 35 |
logging.error("GEMINI_API_KEY is not set in the environment variables.")
|
| 36 |
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite", google_api_key=google_api_key)
|
| 37 |
|
| 38 |
# -------------------------------------------------------------------------------
|
| 39 |
# Define Academic Research Agents
|
|
|
|
| 170 |
Executes the given task using the associated agent's LLM and returns the response content.
|
| 171 |
"""
|
| 172 |
try:
|
| 173 |
+
|
| 174 |
if not isinstance(task, Task):
|
| 175 |
raise ValueError(f"Expected 'task' to be an instance of Task, got {type(task)}")
|
| 176 |
if not hasattr(task, 'agent') or not isinstance(task.agent, Agent):
|
|
|
|
| 192 |
SystemMessage(content=system_input),
|
| 193 |
HumanMessage(content=task_input)
|
| 194 |
]
|
| 195 |
+
|
| 196 |
+
time.sleep(5) #delay inbetween requests to avoid reaching max quota and program terminating
|
| 197 |
+
|
| 198 |
response = task.agent.llm.invoke(messages)
|
| 199 |
+
|
| 200 |
if not response or not response.content:
|
| 201 |
raise ValueError("Empty response from LLM.")
|
| 202 |
return response.content
|
| 203 |
except Exception as e:
|
| 204 |
logging.error(f"Error in task '{task.agent.role}': {e}")
|
| 205 |
+
|
| 206 |
+
if "429" in str(e) or "quota" in str(e).lower():
|
| 207 |
+
logging.info("Rate limit hit, waiting 10 seconds and retrying...")
|
| 208 |
+
time.sleep(10)
|
| 209 |
+
try:
|
| 210 |
+
response = task.agent.llm.invoke(messages)
|
| 211 |
+
if not response or not response.content:
|
| 212 |
+
raise ValueError("Empty response from LLM.")
|
| 213 |
+
return response.content
|
| 214 |
+
except Exception as retry_error:
|
| 215 |
+
logging.error(f"Retry failed: {retry_error}")
|
| 216 |
+
return f"Error: Rate limit exceeded. Please wait a few minutes and try again."
|
| 217 |
return f"Error in {task.agent.role}: {e}"
|