Spaces:
Running
Running
Update agents.py
Browse files
agents.py
CHANGED
|
@@ -133,6 +133,7 @@ class LLMAgentBase(Player):
|
|
| 133 |
async def choose_move(self, battle: Battle) -> str:
|
| 134 |
battle_state_str = self._format_battle_state(battle)
|
| 135 |
decision_result = await self._get_llm_decision(battle_state_str)
|
|
|
|
| 136 |
decision = decision_result.get("decision")
|
| 137 |
error_message = decision_result.get("error")
|
| 138 |
action_taken = False
|
|
@@ -235,7 +236,7 @@ class GeminiAgent(LLMAgentBase):
|
|
| 235 |
prompt,
|
| 236 |
generation_config={"temperature": 0.5}
|
| 237 |
)
|
| 238 |
-
|
| 239 |
if not response.candidates:
|
| 240 |
finish_reason_str = "No candidates found"
|
| 241 |
try:
|
|
@@ -317,7 +318,7 @@ class OpenAIAgent(LLMAgentBase):
|
|
| 317 |
temperature=0.5,
|
| 318 |
)
|
| 319 |
message = response.choices[0].message
|
| 320 |
-
|
| 321 |
# Check for tool calls in the response
|
| 322 |
if message.tool_calls:
|
| 323 |
tool_call = message.tool_calls[0] # Get the first tool call
|
|
@@ -377,7 +378,7 @@ class MistralAgent(LLMAgentBase):
|
|
| 377 |
tool_choice="auto", # Let the model choose
|
| 378 |
temperature=0.5,
|
| 379 |
)
|
| 380 |
-
|
| 381 |
message = response.choices[0].message
|
| 382 |
# Check for tool calls in the response
|
| 383 |
if message.tool_calls:
|
|
|
|
| 133 |
async def choose_move(self, battle: Battle) -> str:
|
| 134 |
battle_state_str = self._format_battle_state(battle)
|
| 135 |
decision_result = await self._get_llm_decision(battle_state_str)
|
| 136 |
+
print(decision_result)
|
| 137 |
decision = decision_result.get("decision")
|
| 138 |
error_message = decision_result.get("error")
|
| 139 |
action_taken = False
|
|
|
|
| 236 |
prompt,
|
| 237 |
generation_config={"temperature": 0.5}
|
| 238 |
)
|
| 239 |
+
print("GEMINI RESPONSE : "response)
|
| 240 |
if not response.candidates:
|
| 241 |
finish_reason_str = "No candidates found"
|
| 242 |
try:
|
|
|
|
| 318 |
temperature=0.5,
|
| 319 |
)
|
| 320 |
message = response.choices[0].message
|
| 321 |
+
print("OPENAI RESPONSE : "response)
|
| 322 |
# Check for tool calls in the response
|
| 323 |
if message.tool_calls:
|
| 324 |
tool_call = message.tool_calls[0] # Get the first tool call
|
|
|
|
| 378 |
tool_choice="auto", # Let the model choose
|
| 379 |
temperature=0.5,
|
| 380 |
)
|
| 381 |
+
print("Mistral RESPONSE : "response)
|
| 382 |
message = response.choices[0].message
|
| 383 |
# Check for tool calls in the response
|
| 384 |
if message.tool_calls:
|