Update model to gpt-4.1-mini for higher rate limit
Browse filesUpdate model to gpt-4.1-mini for higher tokens per minute (TPM) rate limit.
agents.py
CHANGED
|
@@ -32,7 +32,7 @@ def create_general_ai_agent(verbosity: int = LogLevel.INFO):
|
|
| 32 |
env_tools = [
|
| 33 |
get_attachment_tool,
|
| 34 |
]
|
| 35 |
-
model = OpenAIServerModel(model_id='gpt-4.1')
|
| 36 |
console = Console(record=True)
|
| 37 |
logger = AgentLogger(level=verbosity, console=console)
|
| 38 |
steps_buffer = []
|
|
@@ -122,7 +122,7 @@ def create_general_ai_agent(verbosity: int = LogLevel.INFO):
|
|
| 122 |
raw_answer: Optional[str | None] = None
|
| 123 |
final_answer: Optional[str | None] = None
|
| 124 |
|
| 125 |
-
llm = ChatOpenAI(model='gpt-4.1')
|
| 126 |
logger=AgentLogger(level=verbosity)
|
| 127 |
|
| 128 |
@backoff.on_exception(backoff.expo, openai.RateLimitError, max_time=60, max_tries=6)
|
|
|
|
| 32 |
env_tools = [
|
| 33 |
get_attachment_tool,
|
| 34 |
]
|
| 35 |
+
model = OpenAIServerModel(model_id='gpt-4.1-mini')
|
| 36 |
console = Console(record=True)
|
| 37 |
logger = AgentLogger(level=verbosity, console=console)
|
| 38 |
steps_buffer = []
|
|
|
|
| 122 |
raw_answer: Optional[str | None] = None
|
| 123 |
final_answer: Optional[str | None] = None
|
| 124 |
|
| 125 |
+
llm = ChatOpenAI(model='gpt-4.1-mini')
|
| 126 |
logger=AgentLogger(level=verbosity)
|
| 127 |
|
| 128 |
@backoff.on_exception(backoff.expo, openai.RateLimitError, max_time=60, max_tries=6)
|