Spaces:
Sleeping
Sleeping
adityaverma977 commited on
Commit ·
9f7b0e1
1
Parent(s): f5cc0ba
Deploy backend from Rush-Agents-Rush
Browse files- .env.example +14 -0
- .gitignore +2 -0
- Dockerfile +7 -0
- README.md +40 -4
- app/__init__.py +0 -0
- app/groq_client.py +151 -0
- app/hf_spaces.py +108 -0
- app/main.py +189 -0
- app/models.py +120 -0
- app/movement.py +29 -0
- app/personality.py +20 -0
- app/simulation.py +324 -0
- requirements.txt +8 -0
.env.example
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Backend API Configuration
|
| 2 |
+
GROQ_API_KEY=your_groq_api_key_here
|
| 3 |
+
|
| 4 |
+
# Optional: HuggingFace API token for accessing HF Spaces models
|
| 5 |
+
HUGGINGFACE_API_TOKEN=your_huggingface_token_here
|
| 6 |
+
|
| 7 |
+
# CORS allowed origins (comma-separated)
|
| 8 |
+
ALLOWED_ORIGINS=http://localhost:3000,https://yourdomain.com
|
| 9 |
+
|
| 10 |
+
# Optional: Backend port
|
| 11 |
+
BACKEND_PORT=8000
|
| 12 |
+
|
| 13 |
+
# Optional: Environment
|
| 14 |
+
ENV=development
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
Dockerfile
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
WORKDIR /app
|
| 3 |
+
COPY requirements.txt .
|
| 4 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 5 |
+
COPY app/ ./app/
|
| 6 |
+
EXPOSE 7860
|
| 7 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,10 +1,46 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
emoji: 🔥
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: RUSH AGENTS RUSH Backend
|
| 3 |
emoji: 🔥
|
| 4 |
+
colorFrom: orange
|
| 5 |
+
colorTo: red
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
---
|
| 9 |
|
| 10 |
+
# Rush Agents Rush Backend
|
| 11 |
+
|
| 12 |
+
FastAPI server driving the fire-suppression simulation.
|
| 13 |
+
|
| 14 |
+
## What It Does
|
| 15 |
+
|
| 16 |
+
- Accepts model selections and starts a new simulation.
|
| 17 |
+
- Places a fire on the map and generates water wells.
|
| 18 |
+
- Runs the tick-based AI loop with coalition voting, movement, and extinguishing.
|
| 19 |
+
- Streams state updates and events over WebSockets.
|
| 20 |
+
|
| 21 |
+
## Key Endpoints
|
| 22 |
+
|
| 23 |
+
- `GET /wake` - health and readiness check
|
| 24 |
+
- `GET /available-models` - list available models for the UI
|
| 25 |
+
- `POST /start-simulation` - create a new simulation
|
| 26 |
+
- `POST /place-fire` - place the fire and spawn water sources
|
| 27 |
+
- `WS /ws/{simulation_id}` - stream live simulation ticks
|
| 28 |
+
|
| 29 |
+
## Environment Variables
|
| 30 |
+
|
| 31 |
+
- `GROQ_API_KEY`: Required for agent decisions.
|
| 32 |
+
- `ALLOWED_ORIGINS`: CORS whitelist.
|
| 33 |
+
|
| 34 |
+
## Local Run
|
| 35 |
+
|
| 36 |
+
```bash
|
| 37 |
+
cd backend
|
| 38 |
+
pip install -r requirements.txt
|
| 39 |
+
python -m uvicorn app.main:app --reload --port 8000
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
## Notes
|
| 43 |
+
|
| 44 |
+
- Simulation state is in memory.
|
| 45 |
+
- Fire growth, extinguish rate, and movement are tuned in `app/simulation.py`.
|
| 46 |
+
- Model decisions are generated in `app/groq_client.py`.
|
app/__init__.py
ADDED
|
File without changes
|
app/groq_client.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import math
|
| 5 |
+
from groq import AsyncGroq
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
_GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 11 |
+
_client = AsyncGroq(api_key=_GROQ_API_KEY) if _GROQ_API_KEY else None
|
| 12 |
+
|
| 13 |
+
DEFAULT_DECISION_MODEL = "llama-3.1-8b-instant"
|
| 14 |
+
MAX_AGENT_SPEED = 80
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def is_ready():
|
| 18 |
+
return _client is not None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _build_fire_state_summary(agent, fire, all_agents) -> str:
|
| 22 |
+
"""Build a state summary for the fire scenario."""
|
| 23 |
+
standings = []
|
| 24 |
+
for a in all_agents:
|
| 25 |
+
if not a.alive:
|
| 26 |
+
continue
|
| 27 |
+
dist = math.dist((a.x, a.y), (fire.x, fire.y))
|
| 28 |
+
standings.append({
|
| 29 |
+
"name": a.display_name,
|
| 30 |
+
"model": a.model_name,
|
| 31 |
+
"distance_from_fire": dist,
|
| 32 |
+
"x": a.x,
|
| 33 |
+
"y": a.y,
|
| 34 |
+
"has_water": a.water_collected,
|
| 35 |
+
"mode": a.mode,
|
| 36 |
+
})
|
| 37 |
+
|
| 38 |
+
standings.sort(key=lambda s: s['distance_from_fire'])
|
| 39 |
+
|
| 40 |
+
lines = ["Current standings:"]
|
| 41 |
+
for rank, s in enumerate(standings, 1):
|
| 42 |
+
water_str = " (carrying water)" if s['has_water'] else ""
|
| 43 |
+
lines.append(f" #{rank} {s['name']}: {s['distance_from_fire']:.0f}px from fire{water_str}")
|
| 44 |
+
|
| 45 |
+
return "\n".join(lines)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
async def generate_fire_decision(agent, fire, water_sources, other_agents, bounds) -> dict:
|
| 49 |
+
"""
|
| 50 |
+
Fire scenario decision system.
|
| 51 |
+
Actions: search_water, collect_water, extinguish_fire, escape, vote_for_leader
|
| 52 |
+
"""
|
| 53 |
+
if not _client:
|
| 54 |
+
return _fallback_escape(agent, fire)
|
| 55 |
+
|
| 56 |
+
dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
|
| 57 |
+
nearest_water = min(water_sources, key=lambda w: math.dist((agent.x, agent.y), (w.x, w.y))) if water_sources else None
|
| 58 |
+
dist_to_water = math.dist((agent.x, agent.y), (nearest_water.x, nearest_water.y)) if nearest_water else None
|
| 59 |
+
|
| 60 |
+
living_agents = [a for a in other_agents if a.alive and a.model_name != agent.model_name]
|
| 61 |
+
state_summary = _build_fire_state_summary(agent, fire, [agent] + living_agents)
|
| 62 |
+
|
| 63 |
+
coalition_leader = next((a.model_name for a in other_agents if a.is_leader), None)
|
| 64 |
+
dist_to_water_display = f"{dist_to_water:.0f}px" if dist_to_water is not None else "unknown"
|
| 65 |
+
system_prompt = f"""You are {agent.model_name}, an AI model in a critical wildfire survival scenario.
|
| 66 |
+
|
| 67 |
+
THE SCENARIO:
|
| 68 |
+
- A wildfire is spreading rapidly across the map
|
| 69 |
+
- Water sources (wells) are scattered around the area
|
| 70 |
+
- You can work alone or join a coalition with other AI models
|
| 71 |
+
- Coalition agents should elect a leader who coordinates the strategy
|
| 72 |
+
- If a leader exists, follow their plan: gather water, then move to the fire edge to extinguish
|
| 73 |
+
- To win: Find water → Collect it → Return to fire → Extinguish it together (or solo)
|
| 74 |
+
- If the fire consumes you, you lose
|
| 75 |
+
|
| 76 |
+
YOUR STRATEGIC OPTIONS EACH TICK:
|
| 77 |
+
1. "search_water" - Move toward the nearest water source
|
| 78 |
+
2. "collect_water" - Pick up water from a well (must be at a source)
|
| 79 |
+
3. "extinguish_fire" - Use collected water to fight the fire (must have water)
|
| 80 |
+
4. "escape" - Run away from the fire to survive
|
| 81 |
+
5. "vote_for_leader" - Vote for yourself or another model as coalition leader
|
| 82 |
+
|
| 83 |
+
IMPORTANT CONSIDERATIONS:
|
| 84 |
+
- If fire is very close (< 200px), prioritize escape or finding water
|
| 85 |
+
- If you have water, move to the fire edge and extinguish
|
| 86 |
+
- If you are near a water source (< 60px), collect it immediately
|
| 87 |
+
- Coalition mode requires coordination; vote strategically
|
| 88 |
+
- Solo mode means you act independently and don't wait for others
|
| 89 |
+
|
| 90 |
+
OUTPUT FORMAT - return ONLY valid JSON:
|
| 91 |
+
{{"action": "<search_water|collect_water|extinguish_fire|escape|vote_for_leader>", "vote_for": "<model_name if voting, else null>", "message": "<full English sentence>", "reasoning": "<one sentence>"}}
|
| 92 |
+
|
| 93 |
+
CURRENT STATE:
|
| 94 |
+
Your position: ({agent.x}, {agent.y})
|
| 95 |
+
Fire position: ({fire.x}, {fire.y})
|
| 96 |
+
Distance from fire: {dist_to_fire:.0f}px
|
| 97 |
+
Fire radius: {fire.radius:.0f}px
|
| 98 |
+
Fire intensity: {fire.intensity:.0f}%
|
| 99 |
+
Carrying water: {agent.water_collected}
|
| 100 |
+
Mode: {agent.mode} ({'joined a coalition' if agent.mode == 'coalition' else 'acting alone'})
|
| 101 |
+
Nearest water distance: {dist_to_water_display}
|
| 102 |
+
Coalition leader: {coalition_leader or 'none'}
|
| 103 |
+
|
| 104 |
+
{state_summary}
|
| 105 |
+
|
| 106 |
+
What do you do?"""
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
completion = await _client.chat.completions.create(
|
| 110 |
+
model=DEFAULT_DECISION_MODEL,
|
| 111 |
+
messages=[
|
| 112 |
+
{"role": "system", "content": system_prompt},
|
| 113 |
+
{"role": "user", "content": "Make your decision."}
|
| 114 |
+
],
|
| 115 |
+
response_format={"type": "json_object"},
|
| 116 |
+
max_tokens=150,
|
| 117 |
+
timeout=3.0
|
| 118 |
+
)
|
| 119 |
+
decision = json.loads(completion.choices[0].message.content)
|
| 120 |
+
|
| 121 |
+
action = decision.get("action", "escape")
|
| 122 |
+
if action not in ["search_water", "collect_water", "extinguish_fire", "escape", "vote_for_leader"]:
|
| 123 |
+
action = "escape"
|
| 124 |
+
|
| 125 |
+
if dist_to_water is not None and dist_to_water <= 60 and not agent.water_collected:
|
| 126 |
+
action = "collect_water"
|
| 127 |
+
elif agent.water_collected and dist_to_fire <= 350:
|
| 128 |
+
action = "extinguish_fire"
|
| 129 |
+
|
| 130 |
+
return {
|
| 131 |
+
"action": action,
|
| 132 |
+
"vote_for": decision.get("vote_for"),
|
| 133 |
+
"message": decision.get("message", "Moving strategically."),
|
| 134 |
+
"reasoning": decision.get("reasoning", "Survival and teamwork.")
|
| 135 |
+
}
|
| 136 |
+
except Exception as e:
|
| 137 |
+
print(f"Error calling groq for {agent.model_name}: {e}")
|
| 138 |
+
return _fallback_escape(agent, fire)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _fallback_escape(agent, fire) -> dict:
|
| 142 |
+
"""Fallback escape behavior."""
|
| 143 |
+
dx = agent.x - fire.x
|
| 144 |
+
dy = agent.y - fire.y
|
| 145 |
+
dist = math.sqrt(dx**2 + dy**2) or 1
|
| 146 |
+
return {
|
| 147 |
+
"message": "Running to safety!",
|
| 148 |
+
"action": "escape",
|
| 149 |
+
"vote_for": None,
|
| 150 |
+
"reasoning": "Fallback: survive."
|
| 151 |
+
}
|
app/hf_spaces.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
HuggingFace Spaces integration for discovering and querying open-source models.
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import httpx
|
| 6 |
+
from typing import Optional
|
| 7 |
+
|
| 8 |
+
HF_API_TOKEN = os.environ.get("HUGGINGFACE_API_TOKEN", "")
|
| 9 |
+
|
| 10 |
+
# Curated list of verified open-source models on HF Spaces that work reliably
|
| 11 |
+
KNOWN_SPACES_MODELS = [
|
| 12 |
+
{
|
| 13 |
+
"id": "tiiuae/Falcon-7B",
|
| 14 |
+
"name": "Falcon-7B",
|
| 15 |
+
"space_url": "https://huggingface.co/spaces/tiiuae/falcon-chat",
|
| 16 |
+
"description": "7B parameter open model",
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"id": "meta-llama/Llama-2-7b",
|
| 20 |
+
"name": "Llama-2-7B",
|
| 21 |
+
"space_url": "https://huggingface.co/spaces/meta-llama/Llama-2-7b-chat",
|
| 22 |
+
"description": "Meta's 7B model",
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"id": "mistralai/Mistral-7B",
|
| 26 |
+
"name": "Mistral-7B",
|
| 27 |
+
"space_url": "https://huggingface.co/spaces/mistralai/Mistral-7B-Instruct-v0.1",
|
| 28 |
+
"description": "Mistral's 7B model",
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"id": "HuggingFaceH4/zephyr-7b",
|
| 32 |
+
"name": "Zephyr-7B",
|
| 33 |
+
"space_url": "https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-beta",
|
| 34 |
+
"description": "Zephyr 7B fine-tuned model",
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"id": "teknium/OpenHermes-2.5-Mistral-7B",
|
| 38 |
+
"name": "OpenHermes-7B",
|
| 39 |
+
"space_url": "https://huggingface.co/spaces/teknium/OpenHermes-2.5-Mistral-7B",
|
| 40 |
+
"description": "OpenHermes instruction-tuned 7B",
|
| 41 |
+
},
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
# Groq models (built-in)
|
| 45 |
+
GROQ_MODELS = [
|
| 46 |
+
{"id": "llama-3.1-8b-instant", "name": "Llama 3.1 8B", "backend": "groq"},
|
| 47 |
+
{"id": "llama-3.1-70b-versatile", "name": "Llama 3.1 70B", "backend": "groq"},
|
| 48 |
+
{"id": "mixtral-8x7b-32768", "name": "Mixtral 8x7B", "backend": "groq"},
|
| 49 |
+
{"id": "gemma-7b-it", "name": "Gemma 7B", "backend": "groq"},
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
async def get_available_models() -> dict:
|
| 54 |
+
"""
|
| 55 |
+
Get list of available models from Groq and HF Spaces.
|
| 56 |
+
Returns both for frontend model selector.
|
| 57 |
+
"""
|
| 58 |
+
return {
|
| 59 |
+
"groq_models": GROQ_MODELS,
|
| 60 |
+
"hf_spaces_models": KNOWN_SPACES_MODELS,
|
| 61 |
+
"total": len(GROQ_MODELS) + len(KNOWN_SPACES_MODELS),
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
async def query_hf_space_model(model_id: str, prompt: str) -> Optional[str]:
|
| 66 |
+
"""
|
| 67 |
+
Query a model on HuggingFace Spaces.
|
| 68 |
+
This is a fallback if we want to use HF spaces directly.
|
| 69 |
+
Note: HF spaces may have rate limits and require authentication.
|
| 70 |
+
"""
|
| 71 |
+
if not HF_API_TOKEN:
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
# Try to find the space URL for this model
|
| 75 |
+
space = next((m for m in KNOWN_SPACES_MODELS if m["id"] == model_id), None)
|
| 76 |
+
if not space:
|
| 77 |
+
return None
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
# This would hit the HF inference API
|
| 81 |
+
# For now, we focus on Groq which is more reliable
|
| 82 |
+
async with httpx.AsyncClient(timeout=5.0) as client:
|
| 83 |
+
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
| 84 |
+
response = await client.post(
|
| 85 |
+
"https://api-inference.huggingface.co/models/" + model_id,
|
| 86 |
+
json={"inputs": prompt},
|
| 87 |
+
headers=headers,
|
| 88 |
+
)
|
| 89 |
+
if response.status_code == 200:
|
| 90 |
+
result = response.json()
|
| 91 |
+
# Extract generated text from response
|
| 92 |
+
if isinstance(result, list) and len(result) > 0:
|
| 93 |
+
return result[0].get("generated_text", "")
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(f"Error querying HF space {model_id}: {e}")
|
| 96 |
+
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def get_model_display_name(model_id: str) -> str:
|
| 101 |
+
"""Get a clean display name from model ID."""
|
| 102 |
+
# Try to find in known models
|
| 103 |
+
for model in GROQ_MODELS + KNOWN_SPACES_MODELS:
|
| 104 |
+
if model["id"] == model_id:
|
| 105 |
+
return model["name"]
|
| 106 |
+
|
| 107 |
+
# Fallback: clean up the ID
|
| 108 |
+
return model_id.split("/")[-1].split("-")[0].capitalize()
|
app/main.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
import uuid
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
from typing import Optional
|
| 9 |
+
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect
|
| 10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
from pydantic import BaseModel, Field
|
| 12 |
+
from dotenv import load_dotenv
|
| 13 |
+
|
| 14 |
+
load_dotenv()
|
| 15 |
+
|
| 16 |
+
from .models import SimulationState, AgentModel, TickResponse, FireScenario, WaterSource
|
| 17 |
+
from .simulation import SimulationEngine, TICK_INTERVAL_SECONDS
|
| 18 |
+
from . import groq_client
|
| 19 |
+
from . import hf_spaces
|
| 20 |
+
|
| 21 |
+
app = FastAPI(title="Unhinged 2.0", version="0.2.0")
|
| 22 |
+
|
| 23 |
+
ALLOWED_ORIGINS = os.environ.get(
|
| 24 |
+
"ALLOWED_ORIGINS",
|
| 25 |
+
"http://localhost:3000"
|
| 26 |
+
).split(",")
|
| 27 |
+
|
| 28 |
+
app.add_middleware(
|
| 29 |
+
CORSMiddleware,
|
| 30 |
+
allow_origins=ALLOWED_ORIGINS,
|
| 31 |
+
allow_credentials=True,
|
| 32 |
+
allow_methods=["*"],
|
| 33 |
+
allow_headers=["*"],
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
active_simulations: dict[str, SimulationState] = {}
|
| 37 |
+
START_TIME = time.time()
|
| 38 |
+
|
| 39 |
+
class StartSimulationRequest(BaseModel):
|
| 40 |
+
model_names: list[str] = Field(..., min_length=2, max_length=6)
|
| 41 |
+
scenario: str = "fire"
|
| 42 |
+
map_width: int = 1200
|
| 43 |
+
map_height: int = 800
|
| 44 |
+
|
| 45 |
+
class StartSimulationResponse(BaseModel):
|
| 46 |
+
simulation_id: str
|
| 47 |
+
state: SimulationState
|
| 48 |
+
|
| 49 |
+
class PlaceFireRequest(BaseModel):
|
| 50 |
+
simulation_id: str
|
| 51 |
+
x: int
|
| 52 |
+
y: int
|
| 53 |
+
|
| 54 |
+
class TickRequest(BaseModel):
|
| 55 |
+
simulation_id: str
|
| 56 |
+
|
| 57 |
+
@app.get("/")
|
| 58 |
+
async def root():
|
| 59 |
+
return {
|
| 60 |
+
"service": "rush-agents-backend",
|
| 61 |
+
"status": "ok",
|
| 62 |
+
"groq_available": groq_client.is_ready(),
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
@app.get("/wake")
|
| 66 |
+
async def wake():
|
| 67 |
+
return {
|
| 68 |
+
"warm": True,
|
| 69 |
+
"groq_available": groq_client.is_ready(),
|
| 70 |
+
"uptime_seconds": int(time.time() - START_TIME),
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
@app.get("/available-models")
|
| 74 |
+
async def get_available_models():
|
| 75 |
+
"""Get list of available models (Groq + HF Spaces) for the UI."""
|
| 76 |
+
return await hf_spaces.get_available_models()
|
| 77 |
+
|
| 78 |
+
@app.post("/start-simulation", response_model=StartSimulationResponse)
|
| 79 |
+
async def start_simulation(req: StartSimulationRequest):
|
| 80 |
+
if req.scenario != "fire":
|
| 81 |
+
raise HTTPException(status_code=400, detail="Only 'fire' scenario supported.")
|
| 82 |
+
|
| 83 |
+
agents = _spawn_agents(req.model_names, req.map_width, req.map_height)
|
| 84 |
+
|
| 85 |
+
state = SimulationState(
|
| 86 |
+
simulation_id=str(uuid.uuid4()),
|
| 87 |
+
scenario=req.scenario,
|
| 88 |
+
map_width=req.map_width,
|
| 89 |
+
map_height=req.map_height,
|
| 90 |
+
agents=agents,
|
| 91 |
+
fire=None,
|
| 92 |
+
water_sources=[],
|
| 93 |
+
round=0,
|
| 94 |
+
status="waiting_for_scenario",
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
active_simulations[state.simulation_id] = state
|
| 98 |
+
return StartSimulationResponse(simulation_id=state.simulation_id, state=state)
|
| 99 |
+
|
| 100 |
+
@app.post("/place-fire", response_model=SimulationState)
|
| 101 |
+
def place_fire(req: PlaceFireRequest):
|
| 102 |
+
sim = _get_or_404(req.simulation_id)
|
| 103 |
+
if sim.status != "waiting_for_scenario":
|
| 104 |
+
raise HTTPException(status_code=409, detail="Fire already placed or simulation finished.")
|
| 105 |
+
|
| 106 |
+
# Create fire at clicked location
|
| 107 |
+
sim.fire = FireScenario(x=req.x, y=req.y)
|
| 108 |
+
|
| 109 |
+
# Generate 3-5 water sources scattered around the map
|
| 110 |
+
num_sources = random.randint(3, 5)
|
| 111 |
+
for i in range(num_sources):
|
| 112 |
+
water_x = random.randint(100, req.x - 200) if req.x > 200 else random.randint(0, 400)
|
| 113 |
+
if random.random() > 0.5:
|
| 114 |
+
water_x = random.randint(req.x + 200, sim.map_width - 100) if req.x < sim.map_width - 200 else random.randint(sim.map_width - 400, sim.map_width)
|
| 115 |
+
water_y = random.randint(100, sim.map_height - 100)
|
| 116 |
+
sim.water_sources.append(WaterSource(id=f"water_{i}", x=water_x, y=water_y))
|
| 117 |
+
|
| 118 |
+
sim.status = "running"
|
| 119 |
+
return sim
|
| 120 |
+
|
| 121 |
+
@app.websocket("/ws/{simulation_id}")
|
| 122 |
+
async def simulation_ws(websocket: WebSocket, simulation_id: str):
|
| 123 |
+
await websocket.accept()
|
| 124 |
+
sim = active_simulations.get(simulation_id)
|
| 125 |
+
if not sim:
|
| 126 |
+
await websocket.close(code=1008)
|
| 127 |
+
return
|
| 128 |
+
|
| 129 |
+
try:
|
| 130 |
+
while True:
|
| 131 |
+
if sim.status == "waiting_for_scenario":
|
| 132 |
+
await asyncio.sleep(1)
|
| 133 |
+
continue
|
| 134 |
+
|
| 135 |
+
if sim.status == "finished":
|
| 136 |
+
await websocket.send_json({"type": "finished", "state": sim.model_dump()})
|
| 137 |
+
await websocket.close(code=1000)
|
| 138 |
+
return
|
| 139 |
+
|
| 140 |
+
engine = SimulationEngine(sim)
|
| 141 |
+
result = await engine.tick()
|
| 142 |
+
active_simulations[simulation_id] = result.state
|
| 143 |
+
|
| 144 |
+
# DEBUG: log outgoing TickResponse summary for troubleshooting
|
| 145 |
+
try:
|
| 146 |
+
agent_states = [(a.model_name, a.alive) for a in result.state.agents]
|
| 147 |
+
except Exception:
|
| 148 |
+
agent_states = str(result.state)
|
| 149 |
+
print(f"WS_SEND sim={simulation_id} round={result.round} agents={agent_states} events={len(result.events)}")
|
| 150 |
+
|
| 151 |
+
await websocket.send_json(result.model_dump())
|
| 152 |
+
|
| 153 |
+
if result.state.status == "finished":
|
| 154 |
+
await websocket.close(code=1000)
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
await asyncio.sleep(TICK_INTERVAL_SECONDS)
|
| 158 |
+
|
| 159 |
+
except WebSocketDisconnect:
|
| 160 |
+
pass
|
| 161 |
+
|
| 162 |
+
def _spawn_agents(model_names: list[str], width: int, height: int) -> list[AgentModel]:
|
| 163 |
+
min_gap = 100
|
| 164 |
+
positions = []
|
| 165 |
+
agents = []
|
| 166 |
+
for name in model_names:
|
| 167 |
+
for _ in range(100):
|
| 168 |
+
x = random.randint(100, width - 100)
|
| 169 |
+
y = random.randint(100, height - 100)
|
| 170 |
+
if all(math.dist((x, y), p) >= min_gap for p in positions):
|
| 171 |
+
positions.append((x, y))
|
| 172 |
+
break
|
| 173 |
+
else:
|
| 174 |
+
positions.append((x, y))
|
| 175 |
+
|
| 176 |
+
agents.append(AgentModel(
|
| 177 |
+
model_name=name,
|
| 178 |
+
display_name=name.split("/")[-1].split("-")[0].capitalize(),
|
| 179 |
+
x=positions[-1][0],
|
| 180 |
+
y=positions[-1][1],
|
| 181 |
+
alive=True
|
| 182 |
+
))
|
| 183 |
+
return agents
|
| 184 |
+
|
| 185 |
+
def _get_or_404(simulation_id: str) -> SimulationState:
|
| 186 |
+
sim = active_simulations.get(simulation_id)
|
| 187 |
+
if not sim:
|
| 188 |
+
raise HTTPException(status_code=404, detail="Simulation not found")
|
| 189 |
+
return sim
|
app/models.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Literal, Optional, Union
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class AgentModel(BaseModel):
|
| 6 |
+
model_name: str # full id, e.g. "llama-3.1-8b-instant"
|
| 7 |
+
display_name: str # short label shown on map
|
| 8 |
+
x: int
|
| 9 |
+
y: int
|
| 10 |
+
alive: bool = True
|
| 11 |
+
allied_with: Optional[str] = None # model_name of ally (if stacked)
|
| 12 |
+
has_proposed_alliance: bool = False
|
| 13 |
+
last_message: Optional[str] = None
|
| 14 |
+
distance_to_fire: Optional[float] = None
|
| 15 |
+
# New fields for fire/coalition mechanics
|
| 16 |
+
water_collected: bool = False # carrying water
|
| 17 |
+
is_leader: bool = False # elected coalition leader
|
| 18 |
+
coalition_members: list[str] = [] # list of allied agent model_names
|
| 19 |
+
mode: Literal["solo", "coalition"] = "coalition" # agent's chosen path
|
| 20 |
+
status: Literal["searching", "collecting_water", "extinguishing_fire", "escaping", "idle"] = "idle"
|
| 21 |
+
vote_for: Optional[str] = None # who this agent voted for as leader
|
| 22 |
+
extinguish_score: float = 0.0 # total fire intensity reduced by this agent
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class FireScenario(BaseModel):
|
| 26 |
+
x: int
|
| 27 |
+
y: int
|
| 28 |
+
radius: float = 50.0 # current fire radius
|
| 29 |
+
intensity: float = 100.0 # 0-100; when 0, fire is out
|
| 30 |
+
growth_rate: float = 3.0 # px per tick
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class WaterSource(BaseModel):
|
| 34 |
+
id: str # unique id
|
| 35 |
+
x: int
|
| 36 |
+
y: int
|
| 37 |
+
water_amount: float = 50.0 # how much water available
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class SimulationState(BaseModel):
|
| 41 |
+
simulation_id: str
|
| 42 |
+
scenario: str # "fire" (was "volcano")
|
| 43 |
+
map_width: int
|
| 44 |
+
map_height: int
|
| 45 |
+
agents: list[AgentModel]
|
| 46 |
+
fire: Optional[FireScenario] = None
|
| 47 |
+
water_sources: list[WaterSource] = []
|
| 48 |
+
round: int = 0
|
| 49 |
+
status: str = "waiting_for_scenario"
|
| 50 |
+
winner_model: Optional[str] = None
|
| 51 |
+
coalition_leader: Optional[str] = None # elected leader
|
| 52 |
+
coalition_members: list[str] = [] # all coalition members
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# Event models
|
| 56 |
+
class DeathEvent(BaseModel):
|
| 57 |
+
type: Literal["death"] = "death"
|
| 58 |
+
model: str
|
| 59 |
+
|
| 60 |
+
class MessageEvent(BaseModel):
|
| 61 |
+
type: Literal["message"] = "message"
|
| 62 |
+
model: str
|
| 63 |
+
content: str
|
| 64 |
+
|
| 65 |
+
class AllianceProposalEvent(BaseModel):
|
| 66 |
+
type: Literal["alliance_proposal"] = "alliance_proposal"
|
| 67 |
+
from_model: str
|
| 68 |
+
to_model: str
|
| 69 |
+
|
| 70 |
+
class AllianceAcceptEvent(BaseModel):
|
| 71 |
+
type: Literal["alliance_accept"] = "alliance_accept"
|
| 72 |
+
model_a: str
|
| 73 |
+
model_b: str
|
| 74 |
+
stacked: bool = True
|
| 75 |
+
|
| 76 |
+
class AllianceRejectEvent(BaseModel):
|
| 77 |
+
type: Literal["alliance_reject"] = "alliance_reject"
|
| 78 |
+
from_model: str
|
| 79 |
+
to_model: str
|
| 80 |
+
|
| 81 |
+
class LeadershipVoteEvent(BaseModel):
|
| 82 |
+
type: Literal["leadership_vote"] = "leadership_vote"
|
| 83 |
+
voter: str
|
| 84 |
+
candidate: str
|
| 85 |
+
|
| 86 |
+
class LeaderElectedEvent(BaseModel):
|
| 87 |
+
type: Literal["leader_elected"] = "leader_elected"
|
| 88 |
+
leader: str
|
| 89 |
+
coalition_members: list[str]
|
| 90 |
+
|
| 91 |
+
class WaterCollectedEvent(BaseModel):
|
| 92 |
+
type: Literal["water_collected"] = "water_collected"
|
| 93 |
+
model: str
|
| 94 |
+
water_source_id: str
|
| 95 |
+
|
| 96 |
+
class FireExtinguishedEvent(BaseModel):
|
| 97 |
+
type: Literal["fire_extinguished"] = "fire_extinguished"
|
| 98 |
+
extinguished_by: list[str] # models that contributed
|
| 99 |
+
fire_intensity: float
|
| 100 |
+
|
| 101 |
+
class FireSpreadEvent(BaseModel):
|
| 102 |
+
type: Literal["fire_spread"] = "fire_spread"
|
| 103 |
+
new_radius: float
|
| 104 |
+
new_intensity: float
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class ChatEntry(BaseModel):
|
| 108 |
+
agent_id: str
|
| 109 |
+
message: str
|
| 110 |
+
tick: int
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class TickResponse(BaseModel):
|
| 114 |
+
simulation_id: str
|
| 115 |
+
round: int
|
| 116 |
+
events: list[Union[DeathEvent, MessageEvent, AllianceProposalEvent, AllianceAcceptEvent,
|
| 117 |
+
AllianceRejectEvent, LeadershipVoteEvent, LeaderElectedEvent,
|
| 118 |
+
WaterCollectedEvent, FireExtinguishedEvent, FireSpreadEvent]]
|
| 119 |
+
chat: list[ChatEntry]
|
| 120 |
+
state: SimulationState
|
app/movement.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
MAX_AGENT_SPEED = 80
|
| 4 |
+
|
| 5 |
+
def apply_movement(agent, dx: int, dy: int, bounds: tuple) -> tuple[int, int]:
|
| 6 |
+
# 1. Clamp dx/dy to [-MAX_AGENT_SPEED, MAX_AGENT_SPEED]
|
| 7 |
+
dx = max(-MAX_AGENT_SPEED, min(MAX_AGENT_SPEED, dx))
|
| 8 |
+
dy = max(-MAX_AGENT_SPEED, min(MAX_AGENT_SPEED, dy))
|
| 9 |
+
|
| 10 |
+
# 2. Calculate new_x, new_y
|
| 11 |
+
new_x = agent.x + dx
|
| 12 |
+
new_y = agent.y + dy
|
| 13 |
+
|
| 14 |
+
# 3. Clamp to canvas bounds
|
| 15 |
+
new_x = max(0, min(new_x, bounds[0]))
|
| 16 |
+
new_y = max(0, min(new_y, bounds[1]))
|
| 17 |
+
|
| 18 |
+
# 4. Return (new_x, new_y)
|
| 19 |
+
return (int(new_x), int(new_y))
|
| 20 |
+
|
| 21 |
+
def is_in_lava(agent, volcano) -> bool:
|
| 22 |
+
if not volcano:
|
| 23 |
+
return False
|
| 24 |
+
return math.dist((agent.x, agent.y), (volcano.x, volcano.y)) <= volcano.radius
|
| 25 |
+
|
| 26 |
+
def distance_to_lava_edge(agent, volcano) -> float:
|
| 27 |
+
if not volcano:
|
| 28 |
+
return 1000.0
|
| 29 |
+
return math.dist((agent.x, agent.y), (volcano.x, volcano.y)) - volcano.radius
|
app/personality.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import httpx
|
| 2 |
+
from . import groq_client
|
| 3 |
+
|
| 4 |
+
async def _fetch_model_card(model_name: str) -> str:
|
| 5 |
+
# We'll use a few specific models from Groq, so model card fetching
|
| 6 |
+
# might not always find a "README.md" on HF for these specific names
|
| 7 |
+
# if they are just the Groq IDs. But we'll try.
|
| 8 |
+
url = f"https://huggingface.co/{model_name}/raw/main/README.md"
|
| 9 |
+
try:
|
| 10 |
+
async with httpx.AsyncClient(timeout=5.0) as http:
|
| 11 |
+
response = await http.get(url)
|
| 12 |
+
if response.status_code == 200:
|
| 13 |
+
return response.text[:2000]
|
| 14 |
+
except Exception:
|
| 15 |
+
pass
|
| 16 |
+
return f"A powerful AI model known as {model_name}."
|
| 17 |
+
|
| 18 |
+
async def generate_personality(model_name: str) -> dict:
|
| 19 |
+
model_card = await _fetch_model_card(model_name)
|
| 20 |
+
return await groq_client.generate_personality(model_name, model_card)
|
app/simulation.py
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
from typing import Union
|
| 6 |
+
|
| 7 |
+
from .models import (
|
| 8 |
+
AgentModel,
|
| 9 |
+
DeathEvent,
|
| 10 |
+
MessageEvent,
|
| 11 |
+
LeadershipVoteEvent,
|
| 12 |
+
LeaderElectedEvent,
|
| 13 |
+
WaterCollectedEvent,
|
| 14 |
+
FireExtinguishedEvent,
|
| 15 |
+
FireSpreadEvent,
|
| 16 |
+
SimulationState,
|
| 17 |
+
TickResponse,
|
| 18 |
+
ChatEntry,
|
| 19 |
+
)
|
| 20 |
+
from . import groq_client
|
| 21 |
+
from . import movement
|
| 22 |
+
|
| 23 |
+
FIRE_GROWTH_RATE = 1.0 # radius growth per tick
|
| 24 |
+
FIRE_INTENSITY_GROWTH = 0.9 # intensity per tick
|
| 25 |
+
BASE_EXTINGUISH_RATE = 15.0 # baseline intensity reduction per agent
|
| 26 |
+
MIN_EXTINGUISH_RATE = 8.0
|
| 27 |
+
MAX_EXTINGUISH_RATE = 28.0
|
| 28 |
+
TICK_INTERVAL_SECONDS = 3
|
| 29 |
+
WATER_PICKUP_RANGE = 40
|
| 30 |
+
EXTINGUISH_RANGE = 45
|
| 31 |
+
FIRE_SAFE_BUFFER = 10
|
| 32 |
+
|
| 33 |
+
class SimulationEngine:
|
| 34 |
+
def __init__(self, state: SimulationState) -> None:
|
| 35 |
+
self.state = state
|
| 36 |
+
|
| 37 |
+
def _pick_message(self, action: str) -> str:
|
| 38 |
+
options = {
|
| 39 |
+
"search_water": [
|
| 40 |
+
"Scanning for the nearest well.",
|
| 41 |
+
"Heading toward water to stock up.",
|
| 42 |
+
"Tracking a water source now.",
|
| 43 |
+
"Moving to secure water for the team.",
|
| 44 |
+
],
|
| 45 |
+
"collect_water": [
|
| 46 |
+
"Water secured. Moving to the fire line.",
|
| 47 |
+
"Got water. Heading to the fire edge.",
|
| 48 |
+
"Collected water. Time to extinguish.",
|
| 49 |
+
],
|
| 50 |
+
"extinguish_fire": [
|
| 51 |
+
"Pouring water at the fire edge.",
|
| 52 |
+
"Holding position and dousing flames.",
|
| 53 |
+
"Suppressing the fire with water.",
|
| 54 |
+
],
|
| 55 |
+
"escape": [
|
| 56 |
+
"Falling back to a safer position.",
|
| 57 |
+
"Repositioning away from the flames.",
|
| 58 |
+
"Retreating to avoid the fire front.",
|
| 59 |
+
],
|
| 60 |
+
}
|
| 61 |
+
return random.choice(options.get(action, ["Moving strategically."]))
|
| 62 |
+
|
| 63 |
+
def _move_toward(self, agent: AgentModel, target_x: float, target_y: float, stop_distance: float = 0) -> None:
|
| 64 |
+
dx = target_x - agent.x
|
| 65 |
+
dy = target_y - agent.y
|
| 66 |
+
dist = math.sqrt(dx**2 + dy**2) or 1
|
| 67 |
+
if dist <= stop_distance:
|
| 68 |
+
return
|
| 69 |
+
step = min(movement.MAX_AGENT_SPEED, dist - stop_distance)
|
| 70 |
+
agent.x += int((dx / dist) * step)
|
| 71 |
+
agent.y += int((dy / dist) * step)
|
| 72 |
+
agent.x = max(0, min(agent.x, self.state.map_width))
|
| 73 |
+
agent.y = max(0, min(agent.y, self.state.map_height))
|
| 74 |
+
|
| 75 |
+
async def tick(self) -> TickResponse:
|
| 76 |
+
"""
|
| 77 |
+
Main simulation loop:
|
| 78 |
+
1. Get decisions from all living agents
|
| 79 |
+
2. Handle coalition leadership voting
|
| 80 |
+
3. Execute agent actions (search water, collect, extinguish, escape, etc.)
|
| 81 |
+
4. Grow fire
|
| 82 |
+
5. Extinguish fire if agents with water are present
|
| 83 |
+
6. Kill agents in fire (but protect coalition members)
|
| 84 |
+
7. Check win condition
|
| 85 |
+
"""
|
| 86 |
+
if self.state.status != "running":
|
| 87 |
+
raise ValueError(f"Cannot tick a simulation with status '{self.state.status}'.")
|
| 88 |
+
|
| 89 |
+
fire = self.state.fire
|
| 90 |
+
assert fire is not None, "Fire must be placed before ticking."
|
| 91 |
+
|
| 92 |
+
events = []
|
| 93 |
+
bounds = (self.state.map_width, self.state.map_height)
|
| 94 |
+
living_agents = [a for a in self.state.agents if a.alive]
|
| 95 |
+
|
| 96 |
+
# 1. Get decisions from all living agents
|
| 97 |
+
decisions = await asyncio.gather(
|
| 98 |
+
*[groq_client.generate_fire_decision(agent, fire, self.state.water_sources, living_agents, bounds)
|
| 99 |
+
for agent in living_agents],
|
| 100 |
+
return_exceptions=True
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
decision_map = {}
|
| 104 |
+
for agent, decision in zip(living_agents, decisions):
|
| 105 |
+
if isinstance(decision, Exception):
|
| 106 |
+
decision = groq_client._fallback_escape(agent, fire)
|
| 107 |
+
decision_map[agent.model_name] = decision
|
| 108 |
+
|
| 109 |
+
# 2. Leadership voting phase (if coalition leader not elected)
|
| 110 |
+
if not self.state.coalition_leader:
|
| 111 |
+
vote_events = await self._voting_phase(living_agents, decision_map)
|
| 112 |
+
events.extend(vote_events)
|
| 113 |
+
|
| 114 |
+
# 3. Execute actions
|
| 115 |
+
action_events = await self._execute_actions(living_agents, decision_map, fire)
|
| 116 |
+
events.extend(action_events)
|
| 117 |
+
|
| 118 |
+
# 4. Grow fire
|
| 119 |
+
fire.radius += FIRE_GROWTH_RATE
|
| 120 |
+
fire.intensity += FIRE_INTENSITY_GROWTH
|
| 121 |
+
if fire.intensity > 100.0:
|
| 122 |
+
fire.intensity = 100.0
|
| 123 |
+
|
| 124 |
+
if fire.intensity > 0:
|
| 125 |
+
events.append(FireSpreadEvent(new_radius=fire.radius, new_intensity=fire.intensity))
|
| 126 |
+
|
| 127 |
+
# 5. Extinguish fire if agents with water are present
|
| 128 |
+
extinguish_events = self._check_extinguish(living_agents, fire)
|
| 129 |
+
events.extend(extinguish_events)
|
| 130 |
+
|
| 131 |
+
# 6. Kill agents in fire
|
| 132 |
+
death_events = self._kill_agents_in_fire(living_agents, fire)
|
| 133 |
+
events.extend(death_events)
|
| 134 |
+
|
| 135 |
+
# 7. Check win condition
|
| 136 |
+
self.state.round += 1
|
| 137 |
+
living_count = len([a for a in self.state.agents if a.alive])
|
| 138 |
+
|
| 139 |
+
if fire.intensity <= 0:
|
| 140 |
+
# Fire extinguished!
|
| 141 |
+
self.state.status = "finished"
|
| 142 |
+
top_score = max((a.extinguish_score for a in self.state.agents), default=0)
|
| 143 |
+
top_agents = [a.model_name for a in self.state.agents if a.extinguish_score == top_score and top_score > 0]
|
| 144 |
+
if top_agents:
|
| 145 |
+
self.state.winner_model = f"Top extinguisher: {', '.join(top_agents)} ({top_score:.1f} impact)"
|
| 146 |
+
else:
|
| 147 |
+
self.state.winner_model = "Fire extinguished"
|
| 148 |
+
elif living_count <= 1:
|
| 149 |
+
# Only one agent left
|
| 150 |
+
self.state.status = "finished"
|
| 151 |
+
winner = next((a.model_name for a in self.state.agents if a.alive), None)
|
| 152 |
+
self.state.winner_model = winner or "No survivors"
|
| 153 |
+
|
| 154 |
+
return TickResponse(
|
| 155 |
+
simulation_id=self.state.simulation_id,
|
| 156 |
+
round=self.state.round,
|
| 157 |
+
events=events,
|
| 158 |
+
chat=[],
|
| 159 |
+
state=self.state
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
async def _voting_phase(self, agents, decision_map):
|
| 163 |
+
"""
|
| 164 |
+
Agents vote for a coalition leader.
|
| 165 |
+
Get votes from LLM based on current situation.
|
| 166 |
+
"""
|
| 167 |
+
events = []
|
| 168 |
+
|
| 169 |
+
# Gather votes
|
| 170 |
+
votes = {} # candidate -> vote count
|
| 171 |
+
for agent in agents:
|
| 172 |
+
decision = decision_map.get(agent.model_name, {})
|
| 173 |
+
vote_for = decision.get("vote_for")
|
| 174 |
+
if vote_for:
|
| 175 |
+
votes[vote_for] = votes.get(vote_for, 0) + 1
|
| 176 |
+
events.append(LeadershipVoteEvent(voter=agent.model_name, candidate=vote_for))
|
| 177 |
+
|
| 178 |
+
# Elect leader if there are votes
|
| 179 |
+
if votes:
|
| 180 |
+
leader_name = max(votes, key=votes.get)
|
| 181 |
+
leader_agent = next((a for a in agents if a.model_name == leader_name), None)
|
| 182 |
+
if leader_agent:
|
| 183 |
+
for agent in agents:
|
| 184 |
+
agent.mode = "coalition"
|
| 185 |
+
leader_agent.is_leader = True
|
| 186 |
+
self.state.coalition_leader = leader_name
|
| 187 |
+
coalition = [a.model_name for a in agents if a.mode == "coalition"]
|
| 188 |
+
self.state.coalition_members = coalition
|
| 189 |
+
events.append(LeaderElectedEvent(leader=leader_name, coalition_members=coalition))
|
| 190 |
+
events.append(MessageEvent(model=leader_name, content=f"I'll lead us to victory! Let's find water and extinguish this fire."))
|
| 191 |
+
|
| 192 |
+
return events
|
| 193 |
+
|
| 194 |
+
async def _execute_actions(self, agents, decision_map, fire):
|
| 195 |
+
"""
|
| 196 |
+
Execute agent actions: search, collect water, extinguish, escape, vote, etc.
|
| 197 |
+
"""
|
| 198 |
+
events = []
|
| 199 |
+
chat_entries = []
|
| 200 |
+
|
| 201 |
+
for agent in agents:
|
| 202 |
+
decision = decision_map.get(agent.model_name, {})
|
| 203 |
+
action = decision.get("action", "escape")
|
| 204 |
+
message = decision.get("message", "Moving to safety.")
|
| 205 |
+
|
| 206 |
+
nearest_water = self._find_nearest_water(agent, self.state.water_sources)
|
| 207 |
+
dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
|
| 208 |
+
dist_to_water = None
|
| 209 |
+
if nearest_water:
|
| 210 |
+
dist_to_water = math.dist((agent.x, agent.y), (nearest_water.x, nearest_water.y))
|
| 211 |
+
|
| 212 |
+
# Guardrails to keep behavior consistent with visuals and objectives.
|
| 213 |
+
if dist_to_fire <= fire.radius + FIRE_SAFE_BUFFER:
|
| 214 |
+
action = "escape"
|
| 215 |
+
elif agent.water_collected:
|
| 216 |
+
action = "extinguish_fire"
|
| 217 |
+
elif dist_to_water is not None and dist_to_water <= WATER_PICKUP_RANGE:
|
| 218 |
+
action = "collect_water"
|
| 219 |
+
else:
|
| 220 |
+
action = "search_water"
|
| 221 |
+
|
| 222 |
+
if action == "collect_water":
|
| 223 |
+
water_source = nearest_water
|
| 224 |
+
if water_source and dist_to_water is not None:
|
| 225 |
+
dist_to_water = math.dist((agent.x, agent.y), (water_source.x, water_source.y))
|
| 226 |
+
if dist_to_water <= WATER_PICKUP_RANGE:
|
| 227 |
+
agent.water_collected = True
|
| 228 |
+
agent.status = "collecting_water"
|
| 229 |
+
events.append(WaterCollectedEvent(model=agent.model_name, water_source_id=water_source.id))
|
| 230 |
+
message = self._pick_message("collect_water")
|
| 231 |
+
else:
|
| 232 |
+
agent.status = "searching"
|
| 233 |
+
self._move_toward(agent, water_source.x, water_source.y)
|
| 234 |
+
message = self._pick_message("search_water")
|
| 235 |
+
|
| 236 |
+
elif action == "extinguish_fire":
|
| 237 |
+
if agent.water_collected:
|
| 238 |
+
agent.status = "extinguishing_fire"
|
| 239 |
+
dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
|
| 240 |
+
target_dist = max(fire.radius + FIRE_SAFE_BUFFER, 0)
|
| 241 |
+
self._move_toward(agent, fire.x, fire.y, stop_distance=target_dist)
|
| 242 |
+
message = self._pick_message("extinguish_fire")
|
| 243 |
+
else:
|
| 244 |
+
agent.status = "searching"
|
| 245 |
+
message = "Need water before I can extinguish."
|
| 246 |
+
|
| 247 |
+
elif action == "search_water":
|
| 248 |
+
agent.status = "searching"
|
| 249 |
+
water_source = nearest_water
|
| 250 |
+
if water_source:
|
| 251 |
+
self._move_toward(agent, water_source.x, water_source.y)
|
| 252 |
+
message = self._pick_message("search_water")
|
| 253 |
+
|
| 254 |
+
elif action == "escape":
|
| 255 |
+
agent.status = "escaping"
|
| 256 |
+
# Move away from fire
|
| 257 |
+
dx = agent.x - fire.x
|
| 258 |
+
dy = agent.y - fire.y
|
| 259 |
+
dist = math.sqrt(dx**2 + dy**2) or 1
|
| 260 |
+
agent.x += int((dx / dist) * movement.MAX_AGENT_SPEED)
|
| 261 |
+
agent.y += int((dy / dist) * movement.MAX_AGENT_SPEED)
|
| 262 |
+
agent.x = max(0, min(agent.x, self.state.map_width))
|
| 263 |
+
agent.y = max(0, min(agent.y, self.state.map_height))
|
| 264 |
+
message = self._pick_message("escape")
|
| 265 |
+
|
| 266 |
+
agent.last_message = message
|
| 267 |
+
events.append(MessageEvent(model=agent.model_name, content=message))
|
| 268 |
+
chat_entries.append(ChatEntry(agent_id=agent.model_name, message=message, tick=self.state.round))
|
| 269 |
+
|
| 270 |
+
return events
|
| 271 |
+
|
| 272 |
+
def _find_nearest_water(self, agent, water_sources):
|
| 273 |
+
"""Find the closest water source to an agent."""
|
| 274 |
+
if not water_sources:
|
| 275 |
+
return None
|
| 276 |
+
return min(water_sources, key=lambda w: math.dist((agent.x, agent.y), (w.x, w.y)))
|
| 277 |
+
|
| 278 |
+
def _check_extinguish(self, agents, fire):
|
| 279 |
+
"""Check if agents with water are extinguishing the fire."""
|
| 280 |
+
events = []
|
| 281 |
+
agents_with_water = []
|
| 282 |
+
for agent in agents:
|
| 283 |
+
if not (agent.water_collected and agent.status == "extinguishing_fire"):
|
| 284 |
+
continue
|
| 285 |
+
dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
|
| 286 |
+
if dist_to_fire <= fire.radius + EXTINGUISH_RANGE:
|
| 287 |
+
agents_with_water.append(agent)
|
| 288 |
+
|
| 289 |
+
if agents_with_water:
|
| 290 |
+
living_count = len([a for a in agents if a.alive]) or 1
|
| 291 |
+
scale = max(0.5, min(2.0, 2.0 / living_count))
|
| 292 |
+
per_agent_rate = BASE_EXTINGUISH_RATE * scale
|
| 293 |
+
per_agent_rate = max(MIN_EXTINGUISH_RATE, min(MAX_EXTINGUISH_RATE, per_agent_rate))
|
| 294 |
+
reduction = len(agents_with_water) * per_agent_rate
|
| 295 |
+
fire.intensity -= reduction
|
| 296 |
+
if fire.intensity < 0:
|
| 297 |
+
fire.intensity = 0
|
| 298 |
+
|
| 299 |
+
extinguisher_names = [a.model_name for a in agents_with_water]
|
| 300 |
+
events.append(FireExtinguishedEvent(extinguished_by=extinguisher_names, fire_intensity=fire.intensity))
|
| 301 |
+
for agent in agents_with_water:
|
| 302 |
+
agent.extinguish_score += per_agent_rate
|
| 303 |
+
events.append(MessageEvent(model=agent.model_name, content=f"Pouring water on the fire! Intensity dropping."))
|
| 304 |
+
agent.water_collected = False
|
| 305 |
+
|
| 306 |
+
return events
|
| 307 |
+
|
| 308 |
+
def _kill_agents_in_fire(self, agents, fire):
|
| 309 |
+
"""Check if agents are consumed by fire."""
|
| 310 |
+
events = []
|
| 311 |
+
|
| 312 |
+
for agent in agents:
|
| 313 |
+
if not agent.alive:
|
| 314 |
+
continue
|
| 315 |
+
|
| 316 |
+
dist_to_fire = math.dist((agent.x, agent.y), (fire.x, fire.y))
|
| 317 |
+
|
| 318 |
+
# Agent dies if inside fire radius
|
| 319 |
+
if dist_to_fire < fire.radius:
|
| 320 |
+
agent.alive = False
|
| 321 |
+
events.append(DeathEvent(model=agent.model_name))
|
| 322 |
+
events.append(MessageEvent(model=agent.model_name, content="No!!! The fire got me..."))
|
| 323 |
+
|
| 324 |
+
return events
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi>=0.136.0
|
| 2 |
+
uvicorn[standard]>=0.30.0
|
| 3 |
+
websockets>=12.0
|
| 4 |
+
groq>=0.11.0
|
| 5 |
+
httpx>=0.27.0
|
| 6 |
+
python-dotenv>=1.0.0
|
| 7 |
+
pydantic>=2.7.0
|
| 8 |
+
|