Spaces:
Sleeping
Sleeping
File size: 12,789 Bytes
9f7b0e1 cb64216 9f7b0e1 258a316 cb64216 97270d8 9f7b0e1 cb64216 9f7b0e1 cb64216 258a316 9f7b0e1 cb64216 967c2f6 cb64216 967c2f6 9f7b0e1 cb64216 9f7b0e1 258a316 221d021 258a316 221d021 258a316 221d021 258a316 221d021 258a316 221d021 258a316 cb64216 258a316 221d021 258a316 221d021 9f7b0e1 cb64216 9f7b0e1 cb64216 9f7b0e1 cb64216 9f7b0e1 cb64216 9f7b0e1 cb64216 9f7b0e1 cb64216 258a316 cb64216 258a316 cb64216 258a316 cb64216 258a316 1bb7020 967c2f6 258a316 9f7b0e1 cb64216 9f7b0e1 cb64216 9f7b0e1 1bb7020 cb64216 9f7b0e1 cb64216 258a316 cb64216 258a316 cb64216 1bb7020 9f7b0e1 cb64216 258a316 cb64216 258a316 cb64216 258a316 cb64216 258a316 cb64216 258a316 9f7b0e1 cb64216 9f7b0e1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 | import json
import math
import os
import random
import time
import httpx
from dotenv import load_dotenv
from . import hf_spaces
load_dotenv()
_HF_API_TOKEN = (os.environ.get("HF_API_TOKEN") or os.environ.get("HUGGINGFACE_API_TOKEN") or "").strip()
_HF_CHAT_URL = "https://router.huggingface.co/v1/chat/completions"
_MODEL_COOLDOWNS: dict[str, float] = {}
MAX_AGENT_SPEED = 80
print(f"[GROQ_CLIENT_INIT] HF_API_TOKEN present: {bool(_HF_API_TOKEN)}")
if not _HF_API_TOKEN:
print("[GROQ_CLIENT_INIT] WARNING: No HF API token found! Set HF_API_TOKEN or HUGGINGFACE_API_TOKEN env var.")
def is_ready():
return bool(_HF_API_TOKEN)
def _headers() -> dict[str, str]:
if not _HF_API_TOKEN:
return {}
return {
"Authorization": f"Bearer {_HF_API_TOKEN}",
"Content-Type": "application/json",
}
def _pick_line(options: list[str], previous_message: str | None = None) -> str:
if not previous_message:
return random.choice(options)
previous = " ".join(previous_message.strip().split()).lower()
filtered = [option for option in options if option.lower() != previous]
return random.choice(filtered or options)
def _generate_chat_message(action: str, agent_name: str, fire_distance: float, has_water: bool, previous_message: str | None = None) -> str:
action_messages = {
"search_water": [
"I'm heading for the nearest well.",
"I need water first, then I'm coming back.",
"Give me a second, I'm making for the water.",
"Water first, then we push the fire.",
"I'm going for the well, stay alive.",
],
"collect_water": [
"I'm at the well now, filling up.",
"Got water, turning back in a second.",
"Hold on, I'm grabbing water.",
"Tank's full, I'm heading back.",
"Water secured, let's make this count.",
],
"extinguish_fire": [
"I've got water, I'm going in.",
"I'm on the fire line now, push with me.",
"Alright, I'm hitting the flames.",
"I'm close enough, pouring water now.",
"Keep moving, I'm taking a shot at the fire.",
],
"escape": [
"Too hot here, I'm backing off.",
"Nope, that's way too close, I'm out.",
"I need space, falling back now.",
"I'm peeling away before this gets worse.",
"I'm not dying here, backing up.",
],
"vote_for_leader": [
"Someone call it, we need one plan.",
"I'll follow a lead if somebody steps up.",
"We need one caller right now.",
"Pick a lead so we stop wasting time.",
"I'm good with a leader, just make it clear.",
],
}
messages = action_messages.get(action, action_messages["escape"])
return _pick_line(messages, previous_message)
def _build_fire_state_summary(agent, fire, all_agents) -> str:
standings = []
for other in all_agents:
if not other.alive:
continue
distance = math.dist((other.x, other.y), (fire.x, fire.y))
standings.append({
"name": other.display_name,
"distance_from_fire": distance,
"has_water": other.water_collected,
})
standings.sort(key=lambda item: item["distance_from_fire"])
lines = ["Current standings:"]
for index, item in enumerate(standings, 1):
suffix = " (carrying water)" if item["has_water"] else ""
lines.append(f"#{index} {item['name']}: {item['distance_from_fire']:.0f}px from fire{suffix}")
return "\n".join(lines)
def _extract_message_content(payload) -> str:
choices = payload.get("choices") or []
if not choices or not isinstance(choices[0], dict):
return ""
message = choices[0].get("message") or {}
content = message.get("content")
if isinstance(content, str):
return content.strip()
if isinstance(content, list):
parts = []
for item in content:
if isinstance(item, dict) and item.get("type") == "text" and isinstance(item.get("text"), str):
parts.append(item["text"])
return "".join(parts).strip()
return ""
def _extract_json_object(text: str) -> dict:
if not text:
return {}
cleaned = text.strip()
if cleaned.startswith("```"):
cleaned = cleaned.replace("```json", "").replace("```", "").strip()
start = cleaned.find("{")
end = cleaned.rfind("}") + 1
if start < 0 or end <= start:
return {}
try:
candidate = cleaned[start:end]
parsed = json.loads(candidate)
except json.JSONDecodeError:
return {}
return parsed if isinstance(parsed, dict) else {}
def _is_robotic_message(message: str) -> bool:
lowered = message.lower().strip()
if not lowered:
return True
robotic_starts = (
"locate ",
"locating ",
"find ",
"finding ",
"search ",
"searching ",
"head ",
"heading ",
"move ",
"moving ",
"look ",
"looking ",
"nearest water",
)
return lowered.startswith(robotic_starts)
def _normalize_decision(decision: dict, agent_name: str, dist_to_fire: float, has_water: bool, previous_message: str | None = None) -> dict:
action = decision.get("action", "escape")
if action not in {"search_water", "collect_water", "extinguish_fire", "escape", "vote_for_leader"}:
action = "escape"
message = " ".join(str(decision.get("message", "")).strip().split())
if not message or _is_robotic_message(message):
message = _generate_chat_message(action, agent_name, dist_to_fire, has_water, previous_message)
elif previous_message and message.lower() == " ".join(previous_message.strip().split()).lower():
message = _generate_chat_message(action, agent_name, dist_to_fire, has_water, previous_message)
vote_for = decision.get("vote_for")
if vote_for is not None and not isinstance(vote_for, str):
vote_for = None
reasoning = " ".join(str(decision.get("reasoning", "")).strip().split())
if not reasoning:
reasoning = "Survival and teamwork."
return {
"action": action,
"vote_for": vote_for,
"message": message[:220],
"reasoning": reasoning[:220],
}
def _model_available(model_id: str) -> bool:
return _MODEL_COOLDOWNS.get(model_id, 0.0) <= time.monotonic()
def _mark_model_unavailable(model_id: str, seconds: int = 90) -> None:
_MODEL_COOLDOWNS[model_id] = time.monotonic() + seconds
async def _request_model_response(target_model: str, prompt: str) -> str:
payload = {
"model": target_model,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 220,
"temperature": 0.4,
}
async with httpx.AsyncClient(timeout=20.0) as client:
response = await client.post(_HF_CHAT_URL, headers=_headers(), json=payload)
response.raise_for_status()
data = response.json()
return _extract_message_content(data)
def _fallback_decision(agent, fire, dist_to_fire: float, dist_to_water: float | None) -> dict:
if dist_to_fire <= max(fire.radius + 20, 140):
action = "escape"
elif agent.water_collected and dist_to_fire <= 360:
action = "extinguish_fire"
elif not agent.water_collected and dist_to_water is not None and dist_to_water <= 60:
action = "collect_water"
elif getattr(agent, "is_leader", False) is False and dist_to_fire > 240 and random.random() < 0.08:
action = "vote_for_leader"
else:
action = "search_water"
return {
"message": _generate_chat_message(action, agent.model_name, dist_to_fire, agent.water_collected, getattr(agent, "last_message", None)),
"action": action,
"vote_for": None,
"reasoning": "Fallback: keep moving with the situation.",
}
async def generate_fire_decision(agent, fire, water_sources, other_agents, bounds, recent_radio=None) -> dict:
if not is_ready():
print(f"[INFERENCE_FAIL] {agent.model_name}: HF token not ready, using fallback")
return _fallback_decision(agent, fire, math.dist((agent.x, agent.y), (fire.x, fire.y)), None)
dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
nearest_water = min(water_sources, key=lambda water: math.dist((agent.x, agent.y), (water.x, water.y))) if water_sources else None
dist_to_water = math.dist((agent.x, agent.y), (nearest_water.x, nearest_water.y)) if nearest_water else None
living_agents = [other for other in other_agents if other.alive and other.model_name != agent.model_name]
state_summary = _build_fire_state_summary(agent, fire, [agent] + living_agents)
radio_summary = "\n".join(recent_radio or []) if recent_radio else "(no recent chat yet)"
coalition_leader = next((other.model_name for other in other_agents if other.is_leader), None)
dist_to_water_display = f"{dist_to_water:.0f}px" if dist_to_water is not None else "unknown"
prompt = f"""You are {agent.model_name} in a wildfire survival simulation.
Scenario:
- A wildfire is spreading across the map
- Water wells are scattered around the area
- Agents can coordinate as a coalition and may vote for a leader
- Winning means getting water and using it to extinguish the fire
- Dying in the fire means losing
Allowed actions:
- search_water
- collect_water
- extinguish_fire
- escape
- vote_for_leader
Rules:
- If the fire is too close, prioritize survival
- If you already have water, move to the fire edge and fight it
- If you are at a well, collect water immediately
- Speak like a real teammate over a radio, not like a status dashboard
- Use normal conversational English in first person
- The message must sound casual, human, and alive
- Avoid robotic phrases like "locate nearest water source", "search for water", "coalition survival", "moving to water source"
- React to the moment and vary your wording from your previous line
- Keep the message to one short sentence, around 6 to 14 words
- Respond with only valid JSON on one line
Current state:
- Position: ({agent.x}, {agent.y})
- Fire position: ({fire.x}, {fire.y})
- Distance from fire: {dist_to_fire:.0f}px
- Fire radius: {fire.radius:.0f}px
- Fire intensity: {fire.intensity:.0f}%
- Carrying water: {agent.water_collected}
- Mode: {agent.mode}
- Nearest water distance: {dist_to_water_display}
- Coalition leader: {coalition_leader or 'none'}
- Your previous line: {getattr(agent, 'last_message', None) or 'none yet'}
Recent radio:
{radio_summary}
{state_summary}
Return exactly:
{{"action":"search_water|collect_water|extinguish_fire|escape|vote_for_leader","vote_for":null,"message":"casual first-person sentence","reasoning":"short sentence"}}"""
requested_model = agent.model_name if hf_spaces.is_supported_model(agent.model_name) else hf_spaces.get_default_model_id()
fallback_model = hf_spaces.get_default_model_id()
models_to_try = []
if _model_available(requested_model):
models_to_try.append(requested_model)
if fallback_model not in models_to_try and _model_available(fallback_model):
models_to_try.append(fallback_model)
for target_model in models_to_try:
try:
print(f"[HF_INFERENCE] {agent.model_name} -> calling {target_model}")
raw_text = await _request_model_response(target_model, prompt)
print(f"[HF_INFERENCE] {agent.model_name}: raw response (first 300 chars): {raw_text[:300]}")
decision = _extract_json_object(raw_text)
if decision:
normalized = _normalize_decision(decision, agent.model_name, dist_to_fire, agent.water_collected, getattr(agent, "last_message", None))
if dist_to_water is not None and dist_to_water <= 60 and not agent.water_collected:
normalized["action"] = "collect_water"
elif agent.water_collected and dist_to_fire <= 350:
normalized["action"] = "extinguish_fire"
return normalized
except Exception as exc:
if getattr(exc, "response", None) is not None and getattr(exc.response, "status_code", None) == 402:
_mark_model_unavailable(target_model)
print(f"[HF_INFERENCE_ERROR] {agent.model_name} via {target_model}: {type(exc).__name__}: {exc}")
return _fallback_decision(agent, fire, dist_to_fire, dist_to_water)
def _fallback_escape(agent, fire) -> dict:
return {
"message": "Running to safety!",
"action": "escape",
"vote_for": None,
"reasoning": "Fallback: survive.",
}
|