Update agent.py
Browse filesmod invoke() to invoke_with_retry()
agent.py
CHANGED
|
@@ -24,6 +24,46 @@ from pydantic import Field
|
|
| 24 |
|
| 25 |
from smolagents import WikipediaSearchTool
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
class SmolagentToolWrapper(BaseTool):
|
| 28 |
"""Wrapper for smolagents tools to make them compatible with LangChain."""
|
| 29 |
|
|
@@ -212,8 +252,9 @@ Please provide a detailed analysis focusing on:
|
|
| 212 |
4. Target audience"""
|
| 213 |
|
| 214 |
# Use the LLM with proper message format
|
| 215 |
-
messages = [HumanMessage(content=prompt)]
|
| 216 |
-
response = self.llm.invoke(messages)
|
|
|
|
| 217 |
return response.content if hasattr(response, 'content') else str(response)
|
| 218 |
|
| 219 |
except Exception as e:
|
|
@@ -241,8 +282,9 @@ Provide a detailed analysis including:
|
|
| 241 |
3. Notable findings
|
| 242 |
4. Any mathematical properties (if applicable)"""
|
| 243 |
|
| 244 |
-
messages = [HumanMessage(content=prompt)]
|
| 245 |
-
response = self.llm.invoke(messages)
|
|
|
|
| 246 |
return response.content if hasattr(response, 'content') else str(response)
|
| 247 |
|
| 248 |
except Exception as e:
|
|
@@ -262,8 +304,9 @@ Focus on:
|
|
| 262 |
3. Text or numbers (if present)
|
| 263 |
4. Overall context and meaning"""
|
| 264 |
|
| 265 |
-
messages = [HumanMessage(content=prompt)]
|
| 266 |
-
response = self.llm.invoke(messages)
|
|
|
|
| 267 |
return response.content if hasattr(response, 'content') else str(response)
|
| 268 |
|
| 269 |
except Exception as e:
|
|
|
|
| 24 |
|
| 25 |
from smolagents import WikipediaSearchTool
|
| 26 |
|
| 27 |
+
def invoke_with_retry(
|
| 28 |
+
llm: ChatGoogleGenerativeAI,
|
| 29 |
+
prompt: str,
|
| 30 |
+
max_retries: int = 5,
|
| 31 |
+
initial_delay: int = 60
|
| 32 |
+
):
|
| 33 |
+
"""
|
| 34 |
+
Google Generative AIへのAPI呼び出しを、`ResourceExhausted`エラー時に再試行する関数。
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
llm: ChatGoogleGenerativeAIのインスタンス。
|
| 38 |
+
prompt: ユーザーからのプロンプト文字列。
|
| 39 |
+
max_retries: 最大再試行回数。デフォルトは5。
|
| 40 |
+
initial_delay: 最初の再試行までの待機時間(秒)。デフォルトは60。
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
成功した場合のAPIレスポンス、失敗した場合はNone。
|
| 44 |
+
"""
|
| 45 |
+
retries = 0
|
| 46 |
+
delay = initial_delay
|
| 47 |
+
|
| 48 |
+
while retries < max_retries:
|
| 49 |
+
try:
|
| 50 |
+
messages = [HumanMessage(content=prompt)]
|
| 51 |
+
response = llm.invoke(messages)
|
| 52 |
+
return response
|
| 53 |
+
except ResourceExhausted as e:
|
| 54 |
+
print(f"APIアクセス上限を超えました。待機して再試行します。({retries + 1}/{max_retries})")
|
| 55 |
+
print(f"エラー詳細: {e}")
|
| 56 |
+
time.sleep(delay)
|
| 57 |
+
delay *= 2 # 指数バックオフ
|
| 58 |
+
retries += 1
|
| 59 |
+
except Exception as e:
|
| 60 |
+
# 他の予期せぬエラーに対する処理
|
| 61 |
+
print(f"予期せぬエラーが発生しました: {e}")
|
| 62 |
+
break
|
| 63 |
+
|
| 64 |
+
print("最大再試行回数に達しました。API呼び出しに失敗しました。")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
class SmolagentToolWrapper(BaseTool):
|
| 68 |
"""Wrapper for smolagents tools to make them compatible with LangChain."""
|
| 69 |
|
|
|
|
| 252 |
4. Target audience"""
|
| 253 |
|
| 254 |
# Use the LLM with proper message format
|
| 255 |
+
#messages = [HumanMessage(content=prompt)]
|
| 256 |
+
#response = self.llm.invoke(messages)
|
| 257 |
+
response = invoke_with_retry(self.llm, prompt)
|
| 258 |
return response.content if hasattr(response, 'content') else str(response)
|
| 259 |
|
| 260 |
except Exception as e:
|
|
|
|
| 282 |
3. Notable findings
|
| 283 |
4. Any mathematical properties (if applicable)"""
|
| 284 |
|
| 285 |
+
#messages = [HumanMessage(content=prompt)]
|
| 286 |
+
#response = self.llm.invoke(messages)
|
| 287 |
+
response = invoke_with_retry(self.llm, prompt)
|
| 288 |
return response.content if hasattr(response, 'content') else str(response)
|
| 289 |
|
| 290 |
except Exception as e:
|
|
|
|
| 304 |
3. Text or numbers (if present)
|
| 305 |
4. Overall context and meaning"""
|
| 306 |
|
| 307 |
+
#messages = [HumanMessage(content=prompt)]
|
| 308 |
+
#response = self.llm.invoke(messages)
|
| 309 |
+
response = invoke_with_retry(self.llm, prompt)
|
| 310 |
return response.content if hasattr(response, 'content') else str(response)
|
| 311 |
|
| 312 |
except Exception as e:
|