Spaces:
Running
Running
Pulastya B
commited on
Commit
·
ebbb065
1
Parent(s):
d812573
fix: Configure Gemini tools at model level, not send_message level
Browse files- Create GenerativeModel with tools parameter in constructor
- Remove tools parameter from all send_message() calls
- Gemini API requires tools configured on model, not per-message
- This is the correct pattern for google-generativeai SDK
- Fixes: TypeError multiple values for keyword argument 'tools'
- src/orchestrator.py +19 -14
src/orchestrator.py
CHANGED
|
@@ -1314,12 +1314,22 @@ You are a DOER. Complete workflows based on user intent."""
|
|
| 1314 |
# Prepare tools once
|
| 1315 |
tools_to_use = self._compress_tools_registry()
|
| 1316 |
|
| 1317 |
-
# For Gemini,
|
| 1318 |
gemini_chat = None
|
| 1319 |
-
gemini_tools = None
|
| 1320 |
if self.provider == "gemini":
|
|
|
|
| 1321 |
gemini_tools = self._convert_to_gemini_tools(tools_to_use)
|
| 1322 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1323 |
|
| 1324 |
while iteration < max_iterations:
|
| 1325 |
iteration += 1
|
|
@@ -1358,20 +1368,16 @@ You are a DOER. Complete workflows based on user intent."""
|
|
| 1358 |
final_content = response_message.content
|
| 1359 |
|
| 1360 |
elif self.provider == "gemini":
|
| 1361 |
-
#
|
| 1362 |
if iteration == 1:
|
|
|
|
| 1363 |
combined_message = f"{messages[0]['content']}\n\n{messages[1]['content']}"
|
| 1364 |
-
|
| 1365 |
-
response = gemini_chat.send_message(
|
| 1366 |
-
combined_message,
|
| 1367 |
-
tools=gemini_tools
|
| 1368 |
-
)
|
| 1369 |
else:
|
| 1370 |
-
# Subsequent iterations: send function responses
|
| 1371 |
-
# The chat session already has tools configured from the first call
|
| 1372 |
last_tool_msg = messages[-1]
|
| 1373 |
if last_tool_msg.get("role") == "tool":
|
| 1374 |
-
# Send function response back to Gemini
|
| 1375 |
from google.ai.generativelanguage_v1beta.types import content as glm_content
|
| 1376 |
|
| 1377 |
function_response_part = glm_content.Part(
|
|
@@ -1381,10 +1387,9 @@ You are a DOER. Complete workflows based on user intent."""
|
|
| 1381 |
)
|
| 1382 |
)
|
| 1383 |
|
| 1384 |
-
# Don't pass tools again - already configured in chat session
|
| 1385 |
response = gemini_chat.send_message(function_response_part)
|
| 1386 |
else:
|
| 1387 |
-
#
|
| 1388 |
response = gemini_chat.send_message("Continue with the next step.")
|
| 1389 |
|
| 1390 |
self.api_calls_made += 1
|
|
|
|
| 1314 |
# Prepare tools once
|
| 1315 |
tools_to_use = self._compress_tools_registry()
|
| 1316 |
|
| 1317 |
+
# For Gemini, create a model with tools configured and start chat session
|
| 1318 |
gemini_chat = None
|
|
|
|
| 1319 |
if self.provider == "gemini":
|
| 1320 |
+
# Convert tools to Gemini format
|
| 1321 |
gemini_tools = self._convert_to_gemini_tools(tools_to_use)
|
| 1322 |
+
|
| 1323 |
+
# Create a NEW model instance with tools configured
|
| 1324 |
+
# This is the correct way for Gemini API - tools are part of model config
|
| 1325 |
+
gemini_model_with_tools = genai.GenerativeModel(
|
| 1326 |
+
self.model,
|
| 1327 |
+
generation_config={"temperature": 0.1},
|
| 1328 |
+
tools=gemini_tools
|
| 1329 |
+
)
|
| 1330 |
+
|
| 1331 |
+
# Start chat with the tool-configured model
|
| 1332 |
+
gemini_chat = gemini_model_with_tools.start_chat(history=[], enable_automatic_function_calling=False)
|
| 1333 |
|
| 1334 |
while iteration < max_iterations:
|
| 1335 |
iteration += 1
|
|
|
|
| 1368 |
final_content = response_message.content
|
| 1369 |
|
| 1370 |
elif self.provider == "gemini":
|
| 1371 |
+
# Send messages WITHOUT tools parameter (tools already configured on model)
|
| 1372 |
if iteration == 1:
|
| 1373 |
+
# First iteration: send system + user message
|
| 1374 |
combined_message = f"{messages[0]['content']}\n\n{messages[1]['content']}"
|
| 1375 |
+
response = gemini_chat.send_message(combined_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1376 |
else:
|
| 1377 |
+
# Subsequent iterations: send function responses
|
|
|
|
| 1378 |
last_tool_msg = messages[-1]
|
| 1379 |
if last_tool_msg.get("role") == "tool":
|
| 1380 |
+
# Send function response back to Gemini
|
| 1381 |
from google.ai.generativelanguage_v1beta.types import content as glm_content
|
| 1382 |
|
| 1383 |
function_response_part = glm_content.Part(
|
|
|
|
| 1387 |
)
|
| 1388 |
)
|
| 1389 |
|
|
|
|
| 1390 |
response = gemini_chat.send_message(function_response_part)
|
| 1391 |
else:
|
| 1392 |
+
# Fallback
|
| 1393 |
response = gemini_chat.send_message("Continue with the next step.")
|
| 1394 |
|
| 1395 |
self.api_calls_made += 1
|