adityaverma977 commited on
Commit
258a316
·
1 Parent(s): cb64216

Improve agent chat tone and HF fallback behavior

Browse files
Files changed (2) hide show
  1. app/groq_client.py +108 -31
  2. backend/app/groq_client.py +108 -31
app/groq_client.py CHANGED
@@ -2,6 +2,7 @@ import json
2
  import math
3
  import os
4
  import random
 
5
 
6
  import httpx
7
  from dotenv import load_dotenv
@@ -12,6 +13,7 @@ load_dotenv()
12
 
13
  _HF_API_TOKEN = (os.environ.get("HF_API_TOKEN") or os.environ.get("HUGGINGFACE_API_TOKEN") or "").strip()
14
  _HF_CHAT_URL = "https://router.huggingface.co/v1/chat/completions"
 
15
 
16
  MAX_AGENT_SPEED = 80
17
 
@@ -33,41 +35,54 @@ def _headers() -> dict[str, str]:
33
  }
34
 
35
 
36
- def _generate_chat_message(action: str, agent_name: str, fire_distance: float, has_water: bool) -> str:
 
 
 
 
 
 
 
 
37
  action_messages = {
38
  "search_water": [
39
- f"{agent_name} is hunting for water...",
40
- f"{agent_name} is tracking the nearest well.",
41
- "Need water before this gets worse.",
42
- "Scanning for the fastest water route.",
 
43
  ],
44
  "collect_water": [
45
- f"{agent_name} is filling up now.",
46
- "Got the well, taking water.",
47
- "Water secured, moving out.",
48
- "That should be enough to fight back.",
 
49
  ],
50
  "extinguish_fire": [
51
- f"{agent_name} is pushing the fire line.",
52
- "Closing in with water.",
53
- "Time to hit the flames.",
54
- "Pressure on the fire now.",
 
55
  ],
56
  "escape": [
57
- f"{agent_name} is backing out.",
58
- "Too hot here, pulling away.",
59
- "Need space before the fire closes in.",
60
- "Resetting position and staying alive.",
 
61
  ],
62
  "vote_for_leader": [
63
- f"{agent_name} wants a leader in place.",
64
- "Coordination first, then pressure.",
65
- "Picking a lead so we stop wasting ticks.",
66
  "We need one caller right now.",
 
 
67
  ],
68
  }
69
  messages = action_messages.get(action, action_messages["escape"])
70
- return random.choice(messages)
71
 
72
 
73
  def _build_fire_state_summary(agent, fire, all_agents) -> str:
@@ -129,14 +144,38 @@ def _extract_json_object(text: str) -> dict:
129
  return parsed if isinstance(parsed, dict) else {}
130
 
131
 
132
- def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, has_water: bool) -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  action = decision.get("action", "escape")
134
  if action not in {"search_water", "collect_water", "extinguish_fire", "escape", "vote_for_leader"}:
135
  action = "escape"
136
 
137
  message = " ".join(str(decision.get("message", "")).strip().split())
138
- if not message:
139
- message = _generate_chat_message(action, agent_name, dist_to_fire, has_water)
 
 
140
 
141
  vote_for = decision.get("vote_for")
142
  if vote_for is not None and not isinstance(vote_for, str):
@@ -154,6 +193,14 @@ def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, ha
154
  }
155
 
156
 
 
 
 
 
 
 
 
 
157
  async def _request_model_response(target_model: str, prompt: str) -> str:
158
  payload = {
159
  "model": target_model,
@@ -169,10 +216,30 @@ async def _request_model_response(target_model: str, prompt: str) -> str:
169
  return _extract_message_content(data)
170
 
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  async def generate_fire_decision(agent, fire, water_sources, other_agents, bounds, recent_radio=None) -> dict:
173
  if not is_ready():
174
  print(f"[INFERENCE_FAIL] {agent.model_name}: HF token not ready, using fallback")
175
- return _fallback_escape(agent, fire)
176
 
177
  dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
178
  nearest_water = min(water_sources, key=lambda water: math.dist((agent.x, agent.y), (water.x, water.y))) if water_sources else None
@@ -204,7 +271,12 @@ Rules:
204
  - If the fire is too close, prioritize survival
205
  - If you already have water, move to the fire edge and fight it
206
  - If you are at a well, collect water immediately
207
- - Keep the message short, natural, and mission-focused
 
 
 
 
 
208
  - Respond with only valid JSON on one line
209
 
210
  Current state:
@@ -217,6 +289,7 @@ Current state:
217
  - Mode: {agent.mode}
218
  - Nearest water distance: {dist_to_water_display}
219
  - Coalition leader: {coalition_leader or 'none'}
 
220
 
221
  Recent radio:
222
  {radio_summary}
@@ -224,12 +297,14 @@ Recent radio:
224
  {state_summary}
225
 
226
  Return exactly:
227
- {{"action":"search_water|collect_water|extinguish_fire|escape|vote_for_leader","vote_for":null,"message":"short sentence","reasoning":"short sentence"}}"""
228
 
229
  requested_model = agent.model_name if hf_spaces.is_supported_model(agent.model_name) else hf_spaces.get_default_model_id()
230
  fallback_model = hf_spaces.get_default_model_id()
231
- models_to_try = [requested_model]
232
- if fallback_model not in models_to_try:
 
 
233
  models_to_try.append(fallback_model)
234
 
235
  for target_model in models_to_try:
@@ -239,16 +314,18 @@ Return exactly:
239
  print(f"[HF_INFERENCE] {agent.model_name}: raw response (first 300 chars): {raw_text[:300]}")
240
  decision = _extract_json_object(raw_text)
241
  if decision:
242
- normalized = _normalize_decision(decision, agent.model_name, dist_to_fire, agent.water_collected)
243
  if dist_to_water is not None and dist_to_water <= 60 and not agent.water_collected:
244
  normalized["action"] = "collect_water"
245
  elif agent.water_collected and dist_to_fire <= 350:
246
  normalized["action"] = "extinguish_fire"
247
  return normalized
248
  except Exception as exc:
 
 
249
  print(f"[HF_INFERENCE_ERROR] {agent.model_name} via {target_model}: {type(exc).__name__}: {exc}")
250
 
251
- return _fallback_escape(agent, fire)
252
 
253
 
254
  def _fallback_escape(agent, fire) -> dict:
 
2
  import math
3
  import os
4
  import random
5
+ import time
6
 
7
  import httpx
8
  from dotenv import load_dotenv
 
13
 
14
  _HF_API_TOKEN = (os.environ.get("HF_API_TOKEN") or os.environ.get("HUGGINGFACE_API_TOKEN") or "").strip()
15
  _HF_CHAT_URL = "https://router.huggingface.co/v1/chat/completions"
16
+ _MODEL_COOLDOWNS: dict[str, float] = {}
17
 
18
  MAX_AGENT_SPEED = 80
19
 
 
35
  }
36
 
37
 
38
+ def _pick_line(options: list[str], previous_message: str | None = None) -> str:
39
+ if not previous_message:
40
+ return random.choice(options)
41
+ previous = " ".join(previous_message.strip().split()).lower()
42
+ filtered = [option for option in options if option.lower() != previous]
43
+ return random.choice(filtered or options)
44
+
45
+
46
+ def _generate_chat_message(action: str, agent_name: str, fire_distance: float, has_water: bool, previous_message: str | None = None) -> str:
47
  action_messages = {
48
  "search_water": [
49
+ "I'm heading for the nearest well.",
50
+ "I need water first, then I'm coming back.",
51
+ "Give me a second, I'm making for the water.",
52
+ "Water first, then we push the fire.",
53
+ "I'm going for the well, stay alive.",
54
  ],
55
  "collect_water": [
56
+ "I'm at the well now, filling up.",
57
+ "Got water, turning back in a second.",
58
+ "Hold on, I'm grabbing water.",
59
+ "Tank's full, I'm heading back.",
60
+ "Water secured, let's make this count.",
61
  ],
62
  "extinguish_fire": [
63
+ "I've got water, I'm going in.",
64
+ "I'm on the fire line now, push with me.",
65
+ "Alright, I'm hitting the flames.",
66
+ "I'm close enough, pouring water now.",
67
+ "Keep moving, I'm taking a shot at the fire.",
68
  ],
69
  "escape": [
70
+ "Too hot here, I'm backing off.",
71
+ "Nope, that's way too close, I'm out.",
72
+ "I need space, falling back now.",
73
+ "I'm peeling away before this gets worse.",
74
+ "I'm not dying here, backing up.",
75
  ],
76
  "vote_for_leader": [
77
+ "Someone call it, we need one plan.",
78
+ "I'll follow a lead if somebody steps up.",
 
79
  "We need one caller right now.",
80
+ "Pick a lead so we stop wasting time.",
81
+ "I'm good with a leader, just make it clear.",
82
  ],
83
  }
84
  messages = action_messages.get(action, action_messages["escape"])
85
+ return _pick_line(messages, previous_message)
86
 
87
 
88
  def _build_fire_state_summary(agent, fire, all_agents) -> str:
 
144
  return parsed if isinstance(parsed, dict) else {}
145
 
146
 
147
+ def _is_robotic_message(message: str) -> bool:
148
+ lowered = message.lower().strip()
149
+ if not lowered:
150
+ return True
151
+ robotic_starts = (
152
+ "locate ",
153
+ "locating ",
154
+ "find ",
155
+ "finding ",
156
+ "search ",
157
+ "searching ",
158
+ "head ",
159
+ "heading ",
160
+ "move ",
161
+ "moving ",
162
+ "look ",
163
+ "looking ",
164
+ "nearest water",
165
+ )
166
+ return lowered.startswith(robotic_starts)
167
+
168
+
169
+ def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, has_water: bool, previous_message: str | None = None) -> dict:
170
  action = decision.get("action", "escape")
171
  if action not in {"search_water", "collect_water", "extinguish_fire", "escape", "vote_for_leader"}:
172
  action = "escape"
173
 
174
  message = " ".join(str(decision.get("message", "")).strip().split())
175
+ if not message or _is_robotic_message(message):
176
+ message = _generate_chat_message(action, agent_name, dist_to_fire, has_water, previous_message)
177
+ elif previous_message and message.lower() == " ".join(previous_message.strip().split()).lower():
178
+ message = _generate_chat_message(action, agent_name, dist_to_fire, has_water, previous_message)
179
 
180
  vote_for = decision.get("vote_for")
181
  if vote_for is not None and not isinstance(vote_for, str):
 
193
  }
194
 
195
 
196
+ def _model_available(model_id: str) -> bool:
197
+ return _MODEL_COOLDOWNS.get(model_id, 0.0) <= time.monotonic()
198
+
199
+
200
+ def _mark_model_unavailable(model_id: str, seconds: int = 90) -> None:
201
+ _MODEL_COOLDOWNS[model_id] = time.monotonic() + seconds
202
+
203
+
204
  async def _request_model_response(target_model: str, prompt: str) -> str:
205
  payload = {
206
  "model": target_model,
 
216
  return _extract_message_content(data)
217
 
218
 
219
+ def _fallback_decision(agent, fire, dist_to_fire: float, dist_to_water: float | None) -> dict:
220
+ if dist_to_fire <= max(fire.radius + 20, 140):
221
+ action = "escape"
222
+ elif agent.water_collected and dist_to_fire <= 360:
223
+ action = "extinguish_fire"
224
+ elif not agent.water_collected and dist_to_water is not None and dist_to_water <= 60:
225
+ action = "collect_water"
226
+ elif getattr(agent, "is_leader", False) is False and dist_to_fire > 240 and random.random() < 0.08:
227
+ action = "vote_for_leader"
228
+ else:
229
+ action = "search_water"
230
+
231
+ return {
232
+ "message": _generate_chat_message(action, agent.model_name, dist_to_fire, agent.water_collected, getattr(agent, "last_message", None)),
233
+ "action": action,
234
+ "vote_for": None,
235
+ "reasoning": "Fallback: keep moving with the situation.",
236
+ }
237
+
238
+
239
  async def generate_fire_decision(agent, fire, water_sources, other_agents, bounds, recent_radio=None) -> dict:
240
  if not is_ready():
241
  print(f"[INFERENCE_FAIL] {agent.model_name}: HF token not ready, using fallback")
242
+ return _fallback_decision(agent, fire, math.dist((agent.x, agent.y), (fire.x, fire.y)), None)
243
 
244
  dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
245
  nearest_water = min(water_sources, key=lambda water: math.dist((agent.x, agent.y), (water.x, water.y))) if water_sources else None
 
271
  - If the fire is too close, prioritize survival
272
  - If you already have water, move to the fire edge and fight it
273
  - If you are at a well, collect water immediately
274
+ - Speak like a real teammate over a radio, not like a status dashboard
275
+ - Use normal conversational English in first person
276
+ - The message must sound casual, human, and alive
277
+ - Avoid robotic phrases like "locate nearest water source", "search for water", "coalition survival", "moving to water source"
278
+ - React to the moment and vary your wording from your previous line
279
+ - Keep the message to one short sentence, around 6 to 14 words
280
  - Respond with only valid JSON on one line
281
 
282
  Current state:
 
289
  - Mode: {agent.mode}
290
  - Nearest water distance: {dist_to_water_display}
291
  - Coalition leader: {coalition_leader or 'none'}
292
+ - Your previous line: {getattr(agent, 'last_message', None) or 'none yet'}
293
 
294
  Recent radio:
295
  {radio_summary}
 
297
  {state_summary}
298
 
299
  Return exactly:
300
+ {{"action":"search_water|collect_water|extinguish_fire|escape|vote_for_leader","vote_for":null,"message":"casual first-person sentence","reasoning":"short sentence"}}"""
301
 
302
  requested_model = agent.model_name if hf_spaces.is_supported_model(agent.model_name) else hf_spaces.get_default_model_id()
303
  fallback_model = hf_spaces.get_default_model_id()
304
+ models_to_try = []
305
+ if _model_available(requested_model):
306
+ models_to_try.append(requested_model)
307
+ if fallback_model not in models_to_try and _model_available(fallback_model):
308
  models_to_try.append(fallback_model)
309
 
310
  for target_model in models_to_try:
 
314
  print(f"[HF_INFERENCE] {agent.model_name}: raw response (first 300 chars): {raw_text[:300]}")
315
  decision = _extract_json_object(raw_text)
316
  if decision:
317
+ normalized = _normalize_decision(decision, agent.model_name, dist_to_fire, agent.water_collected, getattr(agent, "last_message", None))
318
  if dist_to_water is not None and dist_to_water <= 60 and not agent.water_collected:
319
  normalized["action"] = "collect_water"
320
  elif agent.water_collected and dist_to_fire <= 350:
321
  normalized["action"] = "extinguish_fire"
322
  return normalized
323
  except Exception as exc:
324
+ if getattr(exc, "response", None) is not None and getattr(exc.response, "status_code", None) == 402:
325
+ _mark_model_unavailable(target_model)
326
  print(f"[HF_INFERENCE_ERROR] {agent.model_name} via {target_model}: {type(exc).__name__}: {exc}")
327
 
328
+ return _fallback_decision(agent, fire, dist_to_fire, dist_to_water)
329
 
330
 
331
  def _fallback_escape(agent, fire) -> dict:
backend/app/groq_client.py CHANGED
@@ -3,6 +3,7 @@ import math
3
  import os
4
  import random
5
  from pathlib import Path
 
6
 
7
  import httpx
8
  from dotenv import load_dotenv
@@ -13,6 +14,7 @@ load_dotenv(Path(__file__).resolve().parents[1] / ".env")
13
 
14
  _HF_API_TOKEN = (os.environ.get("HF_API_TOKEN") or os.environ.get("HUGGINGFACE_API_TOKEN") or "").strip()
15
  _HF_CHAT_URL = "https://router.huggingface.co/v1/chat/completions"
 
16
 
17
  MAX_AGENT_SPEED = 80
18
 
@@ -34,41 +36,54 @@ def _headers() -> dict[str, str]:
34
  }
35
 
36
 
37
- def _generate_chat_message(action: str, agent_name: str, fire_distance: float, has_water: bool) -> str:
 
 
 
 
 
 
 
 
38
  action_messages = {
39
  "search_water": [
40
- f"{agent_name} is hunting for water...",
41
- f"{agent_name} is tracking the nearest well.",
42
- "Need water before this gets worse.",
43
- "Scanning for the fastest water route.",
 
44
  ],
45
  "collect_water": [
46
- f"{agent_name} is filling up now.",
47
- "Got the well, taking water.",
48
- "Water secured, moving out.",
49
- "That should be enough to fight back.",
 
50
  ],
51
  "extinguish_fire": [
52
- f"{agent_name} is pushing the fire line.",
53
- "Closing in with water.",
54
- "Time to hit the flames.",
55
- "Pressure on the fire now.",
 
56
  ],
57
  "escape": [
58
- f"{agent_name} is backing out.",
59
- "Too hot here, pulling away.",
60
- "Need space before the fire closes in.",
61
- "Resetting position and staying alive.",
 
62
  ],
63
  "vote_for_leader": [
64
- f"{agent_name} wants a leader in place.",
65
- "Coordination first, then pressure.",
66
- "Picking a lead so we stop wasting ticks.",
67
  "We need one caller right now.",
 
 
68
  ],
69
  }
70
  messages = action_messages.get(action, action_messages["escape"])
71
- return random.choice(messages)
72
 
73
 
74
  def _build_fire_state_summary(agent, fire, all_agents) -> str:
@@ -130,14 +145,38 @@ def _extract_json_object(text: str) -> dict:
130
  return parsed if isinstance(parsed, dict) else {}
131
 
132
 
133
- def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, has_water: bool) -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  action = decision.get("action", "escape")
135
  if action not in {"search_water", "collect_water", "extinguish_fire", "escape", "vote_for_leader"}:
136
  action = "escape"
137
 
138
  message = " ".join(str(decision.get("message", "")).strip().split())
139
- if not message:
140
- message = _generate_chat_message(action, agent_name, dist_to_fire, has_water)
 
 
141
 
142
  vote_for = decision.get("vote_for")
143
  if vote_for is not None and not isinstance(vote_for, str):
@@ -155,6 +194,14 @@ def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, ha
155
  }
156
 
157
 
 
 
 
 
 
 
 
 
158
  async def _request_model_response(target_model: str, prompt: str) -> str:
159
  payload = {
160
  "model": target_model,
@@ -170,10 +217,30 @@ async def _request_model_response(target_model: str, prompt: str) -> str:
170
  return _extract_message_content(data)
171
 
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  async def generate_fire_decision(agent, fire, water_sources, other_agents, bounds, recent_radio=None) -> dict:
174
  if not is_ready():
175
  print(f"[INFERENCE_FAIL] {agent.model_name}: HF token not ready, using fallback")
176
- return _fallback_escape(agent, fire)
177
 
178
  dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
179
  nearest_water = min(water_sources, key=lambda water: math.dist((agent.x, agent.y), (water.x, water.y))) if water_sources else None
@@ -205,7 +272,12 @@ Rules:
205
  - If the fire is too close, prioritize survival
206
  - If you already have water, move to the fire edge and fight it
207
  - If you are at a well, collect water immediately
208
- - Keep the message short, natural, and mission-focused
 
 
 
 
 
209
  - Respond with only valid JSON on one line
210
 
211
  Current state:
@@ -218,6 +290,7 @@ Current state:
218
  - Mode: {agent.mode}
219
  - Nearest water distance: {dist_to_water_display}
220
  - Coalition leader: {coalition_leader or 'none'}
 
221
 
222
  Recent radio:
223
  {radio_summary}
@@ -225,12 +298,14 @@ Recent radio:
225
  {state_summary}
226
 
227
  Return exactly:
228
- {{"action":"search_water|collect_water|extinguish_fire|escape|vote_for_leader","vote_for":null,"message":"short sentence","reasoning":"short sentence"}}"""
229
 
230
  requested_model = agent.model_name if hf_spaces.is_supported_model(agent.model_name) else hf_spaces.get_default_model_id()
231
  fallback_model = hf_spaces.get_default_model_id()
232
- models_to_try = [requested_model]
233
- if fallback_model not in models_to_try:
 
 
234
  models_to_try.append(fallback_model)
235
 
236
  for target_model in models_to_try:
@@ -240,16 +315,18 @@ Return exactly:
240
  print(f"[HF_INFERENCE] {agent.model_name}: raw response (first 300 chars): {raw_text[:300]}")
241
  decision = _extract_json_object(raw_text)
242
  if decision:
243
- normalized = _normalize_decision(decision, agent.model_name, dist_to_fire, agent.water_collected)
244
  if dist_to_water is not None and dist_to_water <= 60 and not agent.water_collected:
245
  normalized["action"] = "collect_water"
246
  elif agent.water_collected and dist_to_fire <= 350:
247
  normalized["action"] = "extinguish_fire"
248
  return normalized
249
  except Exception as exc:
 
 
250
  print(f"[HF_INFERENCE_ERROR] {agent.model_name} via {target_model}: {type(exc).__name__}: {exc}")
251
 
252
- return _fallback_escape(agent, fire)
253
 
254
 
255
  def _fallback_escape(agent, fire) -> dict:
 
3
  import os
4
  import random
5
  from pathlib import Path
6
+ import time
7
 
8
  import httpx
9
  from dotenv import load_dotenv
 
14
 
15
  _HF_API_TOKEN = (os.environ.get("HF_API_TOKEN") or os.environ.get("HUGGINGFACE_API_TOKEN") or "").strip()
16
  _HF_CHAT_URL = "https://router.huggingface.co/v1/chat/completions"
17
+ _MODEL_COOLDOWNS: dict[str, float] = {}
18
 
19
  MAX_AGENT_SPEED = 80
20
 
 
36
  }
37
 
38
 
39
+ def _pick_line(options: list[str], previous_message: str | None = None) -> str:
40
+ if not previous_message:
41
+ return random.choice(options)
42
+ previous = " ".join(previous_message.strip().split()).lower()
43
+ filtered = [option for option in options if option.lower() != previous]
44
+ return random.choice(filtered or options)
45
+
46
+
47
+ def _generate_chat_message(action: str, agent_name: str, fire_distance: float, has_water: bool, previous_message: str | None = None) -> str:
48
  action_messages = {
49
  "search_water": [
50
+ "I'm heading for the nearest well.",
51
+ "I need water first, then I'm coming back.",
52
+ "Give me a second, I'm making for the water.",
53
+ "Water first, then we push the fire.",
54
+ "I'm going for the well, stay alive.",
55
  ],
56
  "collect_water": [
57
+ "I'm at the well now, filling up.",
58
+ "Got water, turning back in a second.",
59
+ "Hold on, I'm grabbing water.",
60
+ "Tank's full, I'm heading back.",
61
+ "Water secured, let's make this count.",
62
  ],
63
  "extinguish_fire": [
64
+ "I've got water, I'm going in.",
65
+ "I'm on the fire line now, push with me.",
66
+ "Alright, I'm hitting the flames.",
67
+ "I'm close enough, pouring water now.",
68
+ "Keep moving, I'm taking a shot at the fire.",
69
  ],
70
  "escape": [
71
+ "Too hot here, I'm backing off.",
72
+ "Nope, that's way too close, I'm out.",
73
+ "I need space, falling back now.",
74
+ "I'm peeling away before this gets worse.",
75
+ "I'm not dying here, backing up.",
76
  ],
77
  "vote_for_leader": [
78
+ "Someone call it, we need one plan.",
79
+ "I'll follow a lead if somebody steps up.",
 
80
  "We need one caller right now.",
81
+ "Pick a lead so we stop wasting time.",
82
+ "I'm good with a leader, just make it clear.",
83
  ],
84
  }
85
  messages = action_messages.get(action, action_messages["escape"])
86
+ return _pick_line(messages, previous_message)
87
 
88
 
89
  def _build_fire_state_summary(agent, fire, all_agents) -> str:
 
145
  return parsed if isinstance(parsed, dict) else {}
146
 
147
 
148
+ def _is_robotic_message(message: str) -> bool:
149
+ lowered = message.lower().strip()
150
+ if not lowered:
151
+ return True
152
+ robotic_starts = (
153
+ "locate ",
154
+ "locating ",
155
+ "find ",
156
+ "finding ",
157
+ "search ",
158
+ "searching ",
159
+ "head ",
160
+ "heading ",
161
+ "move ",
162
+ "moving ",
163
+ "look ",
164
+ "looking ",
165
+ "nearest water",
166
+ )
167
+ return lowered.startswith(robotic_starts)
168
+
169
+
170
+ def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, has_water: bool, previous_message: str | None = None) -> dict:
171
  action = decision.get("action", "escape")
172
  if action not in {"search_water", "collect_water", "extinguish_fire", "escape", "vote_for_leader"}:
173
  action = "escape"
174
 
175
  message = " ".join(str(decision.get("message", "")).strip().split())
176
+ if not message or _is_robotic_message(message):
177
+ message = _generate_chat_message(action, agent_name, dist_to_fire, has_water, previous_message)
178
+ elif previous_message and message.lower() == " ".join(previous_message.strip().split()).lower():
179
+ message = _generate_chat_message(action, agent_name, dist_to_fire, has_water, previous_message)
180
 
181
  vote_for = decision.get("vote_for")
182
  if vote_for is not None and not isinstance(vote_for, str):
 
194
  }
195
 
196
 
197
+ def _model_available(model_id: str) -> bool:
198
+ return _MODEL_COOLDOWNS.get(model_id, 0.0) <= time.monotonic()
199
+
200
+
201
+ def _mark_model_unavailable(model_id: str, seconds: int = 90) -> None:
202
+ _MODEL_COOLDOWNS[model_id] = time.monotonic() + seconds
203
+
204
+
205
  async def _request_model_response(target_model: str, prompt: str) -> str:
206
  payload = {
207
  "model": target_model,
 
217
  return _extract_message_content(data)
218
 
219
 
220
+ def _fallback_decision(agent, fire, dist_to_fire: float, dist_to_water: float | None) -> dict:
221
+ if dist_to_fire <= max(fire.radius + 20, 140):
222
+ action = "escape"
223
+ elif agent.water_collected and dist_to_fire <= 360:
224
+ action = "extinguish_fire"
225
+ elif not agent.water_collected and dist_to_water is not None and dist_to_water <= 60:
226
+ action = "collect_water"
227
+ elif getattr(agent, "is_leader", False) is False and dist_to_fire > 240 and random.random() < 0.08:
228
+ action = "vote_for_leader"
229
+ else:
230
+ action = "search_water"
231
+
232
+ return {
233
+ "message": _generate_chat_message(action, agent.model_name, dist_to_fire, agent.water_collected, getattr(agent, "last_message", None)),
234
+ "action": action,
235
+ "vote_for": None,
236
+ "reasoning": "Fallback: keep moving with the situation.",
237
+ }
238
+
239
+
240
  async def generate_fire_decision(agent, fire, water_sources, other_agents, bounds, recent_radio=None) -> dict:
241
  if not is_ready():
242
  print(f"[INFERENCE_FAIL] {agent.model_name}: HF token not ready, using fallback")
243
+ return _fallback_decision(agent, fire, math.dist((agent.x, agent.y), (fire.x, fire.y)), None)
244
 
245
  dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
246
  nearest_water = min(water_sources, key=lambda water: math.dist((agent.x, agent.y), (water.x, water.y))) if water_sources else None
 
272
  - If the fire is too close, prioritize survival
273
  - If you already have water, move to the fire edge and fight it
274
  - If you are at a well, collect water immediately
275
+ - Speak like a real teammate over a radio, not like a status dashboard
276
+ - Use normal conversational English in first person
277
+ - The message must sound casual, human, and alive
278
+ - Avoid robotic phrases like "locate nearest water source", "search for water", "coalition survival", "moving to water source"
279
+ - React to the moment and vary your wording from your previous line
280
+ - Keep the message to one short sentence, around 6 to 14 words
281
  - Respond with only valid JSON on one line
282
 
283
  Current state:
 
290
  - Mode: {agent.mode}
291
  - Nearest water distance: {dist_to_water_display}
292
  - Coalition leader: {coalition_leader or 'none'}
293
+ - Your previous line: {getattr(agent, 'last_message', None) or 'none yet'}
294
 
295
  Recent radio:
296
  {radio_summary}
 
298
  {state_summary}
299
 
300
  Return exactly:
301
+ {{"action":"search_water|collect_water|extinguish_fire|escape|vote_for_leader","vote_for":null,"message":"casual first-person sentence","reasoning":"short sentence"}}"""
302
 
303
  requested_model = agent.model_name if hf_spaces.is_supported_model(agent.model_name) else hf_spaces.get_default_model_id()
304
  fallback_model = hf_spaces.get_default_model_id()
305
+ models_to_try = []
306
+ if _model_available(requested_model):
307
+ models_to_try.append(requested_model)
308
+ if fallback_model not in models_to_try and _model_available(fallback_model):
309
  models_to_try.append(fallback_model)
310
 
311
  for target_model in models_to_try:
 
315
  print(f"[HF_INFERENCE] {agent.model_name}: raw response (first 300 chars): {raw_text[:300]}")
316
  decision = _extract_json_object(raw_text)
317
  if decision:
318
+ normalized = _normalize_decision(decision, agent.model_name, dist_to_fire, agent.water_collected, getattr(agent, "last_message", None))
319
  if dist_to_water is not None and dist_to_water <= 60 and not agent.water_collected:
320
  normalized["action"] = "collect_water"
321
  elif agent.water_collected and dist_to_fire <= 350:
322
  normalized["action"] = "extinguish_fire"
323
  return normalized
324
  except Exception as exc:
325
+ if getattr(exc, "response", None) is not None and getattr(exc.response, "status_code", None) == 402:
326
+ _mark_model_unavailable(target_model)
327
  print(f"[HF_INFERENCE_ERROR] {agent.model_name} via {target_model}: {type(exc).__name__}: {exc}")
328
 
329
+ return _fallback_decision(agent, fire, dist_to_fire, dist_to_water)
330
 
331
 
332
  def _fallback_escape(agent, fire) -> dict: