File size: 18,470 Bytes
59edb07
 
 
 
 
708397c
59edb07
0430992
 
59edb07
 
 
 
708397c
59edb07
 
a4397fb
 
59edb07
0430992
 
 
 
 
 
adb6e68
59edb07
 
 
 
 
 
 
 
403b249
 
 
 
 
59edb07
 
 
 
 
 
fb40e92
 
0430992
507f045
59edb07
 
 
 
 
 
 
 
 
 
 
 
49e29c1
 
 
 
507f045
 
 
 
 
 
 
 
 
 
 
6f4ff21
49e29c1
 
 
6f4ff21
49e29c1
 
6f4ff21
49e29c1
 
59edb07
 
fb40e92
adb6e68
 
 
0430992
 
 
59edb07
 
fb40e92
 
 
af7b74e
 
 
 
 
0430992
af7b74e
 
 
0430992
af7b74e
 
 
0430992
af7b74e
 
0430992
507f045
766b49e
 
0430992
766b49e
0430992
7c09fb8
 
 
af7b74e
507f045
 
 
59edb07
af7b74e
22180c1
59edb07
 
af7b74e
 
 
 
 
 
 
59edb07
 
 
 
 
 
 
 
 
708397c
22180c1
 
 
 
708397c
 
22180c1
23664b9
 
 
22180c1
 
708397c
 
23664b9
 
 
708397c
 
 
22180c1
 
708397c
 
 
 
22180c1
 
708397c
 
 
22180c1
708397c
 
c6c2d4c
 
 
 
708397c
 
 
22180c1
708397c
 
 
 
 
 
 
22180c1
708397c
23664b9
 
 
708397c
 
 
22180c1
708397c
 
 
 
 
 
 
22180c1
708397c
22180c1
708397c
 
 
22180c1
 
708397c
 
 
 
22180c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708397c
 
22180c1
708397c
22180c1
708397c
 
 
 
 
 
22180c1
708397c
 
 
 
22180c1
708397c
 
 
 
 
 
0430992
 
 
adb6e68
0430992
 
 
adb6e68
0430992
 
beb2a11
0430992
beb2a11
 
adb6e68
 
0430992
5e37c7d
beb2a11
 
adb6e68
0430992
adb6e68
 
0430992
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59edb07
 
 
0430992
59edb07
 
 
0430992
2f14920
0430992
 
 
 
2f14920
 
adb6e68
2f14920
 
 
 
7aaffc9
 
 
 
2f14920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7aaffc9
 
 
2f14920
b036f25
 
 
 
507f045
adb6e68
f723585
 
 
b036f25
507f045
0e4c818
507f045
59edb07
 
 
 
0e4c818
b036f25
0e4c818
 
b036f25
 
 
 
0e4c818
 
708397c
 
 
 
32636fa
 
 
 
 
 
 
59edb07
 
 
 
 
 
 
0430992
708397c
0430992
 
59edb07
 
 
 
 
10292a0
 
 
 
59edb07
 
 
 
 
 
 
 
 
 
 
708397c
 
59edb07
 
 
 
 
 
 
 
 
0430992
59edb07
 
 
 
 
 
 
 
 
 
 
 
 
 
a4397fb
 
 
 
 
 
 
 
 
59edb07
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
"""FastAPI server β€” serves the simulation state and handles player input."""

from __future__ import annotations

import asyncio
import base64
import logging
import os
import sys
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Optional

import httpx
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles

try:
    from dotenv import load_dotenv
    load_dotenv()
except ImportError:
    pass

from soci.engine.llm import create_llm_client, PROVIDER_GROQ, PROVIDER_GEMINI, PROVIDER_OLLAMA, PROVIDER_CLAUDE, PROVIDER_NN
from soci.engine.simulation import Simulation
from soci.persistence.database import Database
from soci.persistence.snapshots import load_simulation, save_simulation
from soci.world.city import City
from soci.world.clock import SimClock
from soci.api.routes import router
from soci.api.websocket import ws_router

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s %(levelname)s %(name)s: %(message)s",
    stream=sys.stdout,
)
logger = logging.getLogger(__name__)

# Global simulation instance (shared across requests)
_simulation: Optional[Simulation] = None
_database: Optional[Database] = None
_sim_task: Optional[asyncio.Task] = None
_sim_paused: bool = False
_sim_speed: float = 1.0  # 1.0 = normal, 0.5 = fast, 2.0 = slow
_llm_provider: str = ""  # Track which provider is active
_llm_call_probability: float = 1.0  # 0.0–1.0; set per-provider on startup, adjustable via slider


def get_simulation() -> Simulation:
    assert _simulation is not None, "Simulation not initialized"
    return _simulation


def get_database() -> Database:
    assert _database is not None, "Database not initialized"
    return _database


def get_llm_provider() -> str:
    return _llm_provider


def get_llm_call_probability() -> float:
    return _llm_call_probability


def set_llm_call_probability(value: float) -> None:
    global _llm_call_probability, _simulation
    _llm_call_probability = max(0.0, min(1.0, value))
    if _simulation is not None:
        _simulation.llm_call_probability = _llm_call_probability


async def switch_llm_provider(provider: str, model: Optional[str] = None) -> None:
    """Hot-swap the LLM client on the running simulation."""
    global _llm_provider, _simulation
    assert _simulation is not None, "Simulation not initialized"
    new_llm = create_llm_client(provider=provider, model=model)
    _simulation.llm = new_llm
    _llm_provider = provider
    logger.info(f"LLM provider switched to: {provider}/{model or 'default'} ({new_llm.__class__.__name__})")


async def simulation_loop(sim: Simulation, db: Database, tick_delay: float = 2.0) -> None:
    """Background task that runs the simulation continuously."""
    global _sim_paused, _sim_speed
    # Groq: 30 req/min, Gemini free: 15 RPM β€” these need rate limiting.
    # NN and Ollama are local and don't need it.
    is_rate_limited = _llm_provider in (PROVIDER_GROQ, PROVIDER_GEMINI)
    if is_rate_limited:
        tick_delay = 4.0  # Longer ticks to stay under rate limit

    while True:
        try:
            if _sim_paused:
                await asyncio.sleep(0.5)
                continue

            # At high speeds, limit LLM calls to keep ticks fast
            if _sim_speed <= 0.05:
                # 50x: skip LLM entirely, pure routine mode
                sim._skip_llm_this_tick = True
                sim._max_llm_calls_this_tick = 0
            elif _sim_speed <= 0.15:
                # 10x: max 1 conversation per tick
                sim._max_convos_this_tick = 1
                sim._max_llm_calls_this_tick = 2 if is_rate_limited else 0
            elif _sim_speed <= 0.35:
                # 5x: max 2 conversations per tick
                sim._max_convos_this_tick = 2
                sim._max_llm_calls_this_tick = 3 if is_rate_limited else 0
            else:
                sim._skip_llm_this_tick = False
                if is_rate_limited:
                    # Rate-limited providers: tight budget β€” probability slider does the fine-tuning.
                    # Gemini free tier: 4 RPM, ~1500 RPD β†’ budget=2 + prob=0.10 β‰ˆ 15 calls/h.
                    # Budget=2 so 1 action + 1 conversation can co-exist per tick.
                    sim._max_convos_this_tick = 1
                    sim._max_llm_calls_this_tick = 2
                else:
                    # Ollama / Claude: soft cap to keep ticks responsive
                    sim._max_convos_this_tick = 3
                    sim._max_llm_calls_this_tick = 10

            # Apply the runtime probability slider every tick
            sim.llm_call_probability = _llm_call_probability

            await sim.tick()

            # Auto-save every 24 ticks (~6 sim-hours)
            if sim.clock.total_ticks % 24 == 0:
                await save_simulation(sim, db, "autosave")

            # At high speeds, skip the delay entirely
            delay = tick_delay * _sim_speed
            if delay > 0.05:
                await asyncio.sleep(delay)
            else:
                await asyncio.sleep(0)  # Yield to event loop
        except asyncio.CancelledError:
            logger.info("Simulation loop cancelled")
            await save_simulation(sim, db, "autosave")
            break
        except Exception as e:
            logger.error(f"Simulation tick error: {e}", exc_info=True)
            await asyncio.sleep(5)  # Wait before retrying


async def load_state_from_github(data_dir: Path) -> bool:
    """Fetch autosave.json from the simulation-state branch on GitHub.

    Reads from GITHUB_STATE_BRANCH (default: "simulation-state") so pushes
    never touch the master branch and never trigger Render auto-deploys.

    Env vars:
        GITHUB_TOKEN       β€” personal access token with repo read/write
        GITHUB_OWNER       β€” repo owner e.g. "alice"   (preferred, no slash)
        GITHUB_REPO_NAME   β€” repo name  e.g. "soci"    (preferred, no slash)
        GITHUB_REPO        β€” "owner/repo" fallback for existing setups
        GITHUB_STATE_BRANCH β€” branch name (default: "simulation-state")
        GITHUB_STATE_FILE  β€” path inside repo (default: "state/autosave.json")
    """
    token = os.environ.get("GITHUB_TOKEN", "")
    owner = os.environ.get("GITHUB_OWNER", "")
    repo_name = os.environ.get("GITHUB_REPO_NAME", "")
    repo = f"{owner}/{repo_name}" if owner and repo_name else os.environ.get("GITHUB_REPO", "")
    if not token or not repo:
        return False
    path = os.environ.get("GITHUB_STATE_FILE", "state/autosave.json")
    branch = os.environ.get("GITHUB_STATE_BRANCH", "simulation-state")
    headers = {"Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json"}
    try:
        async with httpx.AsyncClient() as client:
            resp = await client.get(
                f"https://api.github.com/repos/{repo}/contents/{path}",
                params={"ref": branch},
                headers=headers,
                timeout=30.0,
            )
            if resp.status_code == 404:
                logger.info(f"No GitHub state on branch '{branch}' β€” starting fresh")
                return False
            resp.raise_for_status()
            content = base64.b64decode(resp.json()["content"]).decode("utf-8").strip()
            if not content:
                logger.warning("GitHub state file is empty β€” starting fresh")
                return False
            local_path = data_dir / "snapshots" / "autosave.json"
            local_path.parent.mkdir(parents=True, exist_ok=True)
            local_path.write_text(content, encoding="utf-8")
            logger.info(f"Loaded state from GitHub branch '{branch}' ({len(content):,} bytes)")
            return True
    except Exception as e:
        logger.warning(f"Could not load state from GitHub: {e}")
        return False


async def save_state_to_github(data_dir: Path) -> bool:
    """Push autosave.json to the simulation-state branch (never touches master)."""
    token = os.environ.get("GITHUB_TOKEN", "")
    owner = os.environ.get("GITHUB_OWNER", "")
    repo_name = os.environ.get("GITHUB_REPO_NAME", "")
    repo = f"{owner}/{repo_name}" if owner and repo_name else os.environ.get("GITHUB_REPO", "")
    if not token or not repo:
        return False
    path = os.environ.get("GITHUB_STATE_FILE", "state/autosave.json")
    branch = os.environ.get("GITHUB_STATE_BRANCH", "simulation-state")
    local_path = data_dir / "snapshots" / "autosave.json"
    if not local_path.exists():
        logger.warning("No autosave.json to push to GitHub")
        return False
    try:
        content_bytes = local_path.read_bytes()
        encoded = base64.b64encode(content_bytes).decode("ascii")
        headers = {"Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json"}
        async with httpx.AsyncClient() as client:
            # Fetch current file SHA on the state branch (needed to update)
            sha: Optional[str] = None
            get_resp = await client.get(
                f"https://api.github.com/repos/{repo}/contents/{path}",
                params={"ref": branch},
                headers=headers,
                timeout=30.0,
            )
            if get_resp.status_code == 200:
                sha = get_resp.json().get("sha")
            elif get_resp.status_code == 404:
                # Branch or file doesn't exist yet β€” create the branch from master
                ref_resp = await client.get(
                    f"https://api.github.com/repos/{repo}/git/ref/heads/master",
                    headers=headers,
                    timeout=15.0,
                )
                if ref_resp.status_code == 200:
                    master_sha = ref_resp.json()["object"]["sha"]
                    await client.post(
                        f"https://api.github.com/repos/{repo}/git/refs",
                        headers=headers,
                        json={"ref": f"refs/heads/{branch}", "sha": master_sha},
                        timeout=15.0,
                    )
                    logger.info(f"Created GitHub branch '{branch}' for state storage")

            body: dict = {
                "message": "chore: save simulation state",
                "content": encoded,
                "branch": branch,
            }
            if sha:
                body["sha"] = sha

            put_resp = await client.put(
                f"https://api.github.com/repos/{repo}/contents/{path}",
                headers=headers,
                json=body,
                timeout=60.0,
            )
            put_resp.raise_for_status()
            logger.info(f"Saved state to GitHub branch '{branch}' ({len(content_bytes):,} bytes)")
            return True
    except Exception as e:
        logger.warning(f"Could not save state to GitHub: {e}")
        return False


def _choose_provider() -> str:
    """Let the user choose an LLM provider on startup.

    Priority: SOCI_PROVIDER env var > LLM_PROVIDER env var > NN (default) > interactive.
    """
    # Check explicit env vars first
    provider = os.environ.get("SOCI_PROVIDER", "").lower() or os.environ.get("LLM_PROVIDER", "").lower()
    if provider in ("nn", "claude", "groq", "gemini", "ollama"):
        return provider

    # Check which keys are available
    has_groq = bool(os.environ.get("GROQ_API_KEY"))
    has_gemini = bool(os.environ.get("GEMINI_API_KEY"))

    # NN is always available (local ONNX model, no API key).
    options = [("nn", "Soci Agent NN (local ONNX, free, fast)")]
    if has_groq:
        options.append(("groq", "Groq (free tier, 30 req/min)"))
    if has_gemini:
        options.append(("gemini", "Gemini (free tier, 15 req/min via AI Studio)"))
    options.append(("ollama", "Ollama (local LLM)"))

    # If only NN + ollama, just use NN
    if len(options) <= 2:
        chosen = options[0][0]
        print(f"  LLM Provider: {options[0][1]}")
        return chosen

    # Interactive selection
    print("\n  Choose LLM provider:")
    for i, (key, desc) in enumerate(options, 1):
        print(f"    {i}. {desc}")

    try:
        choice = input(f"  Enter choice [1-{len(options)}] (default: 1): ").strip()
        idx = int(choice) - 1 if choice else 0
        if 0 <= idx < len(options):
            chosen = options[idx][0]
        else:
            chosen = options[0][0]
    except (ValueError, EOFError):
        chosen = options[0][0]

    print(f"  -> Using {chosen}")
    return chosen


@asynccontextmanager
async def lifespan(app: FastAPI):
    """Manage simulation lifecycle."""
    global _simulation, _database, _sim_task, _llm_provider

    # Start up
    logger.info("Starting Soci API server...")

    # Choose provider, then health-check it; fall back to next available if quota/error
    _llm_provider = _choose_provider()
    llm = create_llm_client(provider=_llm_provider)
    logger.info(f"LLM provider: {_llm_provider} ({llm.__class__.__name__})")

    # Quick probe β€” if the chosen provider is already quota-exhausted or broken,
    # fall back through Groq β†’ Gemini β†’ Ollama so the sim starts with a working LLM.
    _fallback_order = [PROVIDER_NN, PROVIDER_GROQ, PROVIDER_GEMINI, PROVIDER_OLLAMA]
    probe = await llm.complete("You are a test.", "Reply: ok", max_tokens=8)
    if not probe:
        last_err = getattr(llm, "_last_error", "") or getattr(llm, "_auth_error", "")
        logger.warning(f"Provider '{_llm_provider}' failed probe ({last_err}) β€” trying fallbacks")
        # Reset circuit breaker β€” a failed probe shouldn't block the whole day;
        # the simulation loop will handle rate limits gracefully per-tick.
        if hasattr(llm, "_rate_limited_until"):
            llm._rate_limited_until = 0.0
        for fallback in _fallback_order:
            if fallback == _llm_provider:
                continue
            try:
                candidate = create_llm_client(provider=fallback)
                test = await candidate.complete("You are a test.", "Reply: ok", max_tokens=8)
                if test:
                    llm = candidate
                    _llm_provider = fallback
                    logger.info(f"Fell back to provider '{_llm_provider}'")
                    break
            except Exception:
                continue
        else:
            logger.warning("All provider fallbacks failed β€” simulation will run in routine-only mode")
            # Reset circuit breaker on the original provider so it can retry during simulation
            if hasattr(llm, "_rate_limited_until"):
                llm._rate_limited_until = 0.0

    # Default LLM call probability per provider.
    # Cloud providers default to 0.10 (10%) to conserve daily quotas.
    # Ollama is local so it defaults to 1.0 (100%).
    # Override via SOCI_LLM_PROB env var or the UI slider.
    _provider_default_prob = {
        PROVIDER_NN: 1.0,       # NN is free/local β€” no rate limiting needed
        PROVIDER_GEMINI: 0.10,
        PROVIDER_GROQ: 0.10,
        PROVIDER_CLAUDE: 0.10,
        PROVIDER_OLLAMA: 1.0,
    }
    env_prob = os.environ.get("SOCI_LLM_PROB")

    db = Database()
    await db.connect()
    _database = db

    if env_prob is not None:
        # Env var always wins
        _llm_call_probability = float(env_prob)
    else:
        # Always start with provider default β€” the DB-saved slider value from a
        # previous session may have been tuned for a different provider or context.
        # Users can adjust via the UI slider during the session.
        _llm_call_probability = _provider_default_prob.get(_llm_provider, 0.10)
    logger.info(f"LLM call probability: {_llm_call_probability:.0%}")

    # Pull saved state from GitHub before trying to load locally
    data_dir = Path(os.environ.get("SOCI_DATA_DIR", "data"))
    await load_state_from_github(data_dir)

    # Try to resume β€” any failure falls back to a fresh simulation
    sim = None
    try:
        sim = await load_simulation(db, llm)
    except Exception as e:
        logger.warning(f"Failed to load saved simulation, starting fresh: {e}")

    if sim is None:
        # Create new
        config_dir = Path(__file__).parents[3] / "config"
        city = City.from_yaml(str(config_dir / "city.yaml"))
        clock = SimClock(tick_minutes=15, hour=6, minute=0)
        sim = Simulation(city=city, clock=clock, llm=llm)
        sim.load_agents_from_yaml(str(config_dir / "personas.yaml"))
        # Scale to target agent count with procedural generation
        target_agents = int(os.environ.get("SOCI_AGENTS", "50"))
        if len(sim.agents) < target_agents:
            sim.generate_agents(target_agents - len(sim.agents))
        logger.info(f"Created new simulation with {len(sim.agents)} agents")

    _simulation = sim

    # Start background simulation
    # SOCI_TICK_DELAY: seconds to sleep between ticks (default 0.5).
    # Set to 0 to let LLM latency pace the simulation naturally.
    tick_delay = float(os.environ.get("SOCI_TICK_DELAY", "0.5"))
    _sim_task = asyncio.create_task(simulation_loop(sim, db, tick_delay=tick_delay))

    yield

    # Shut down
    if _sim_task:
        _sim_task.cancel()
        try:
            await _sim_task
        except asyncio.CancelledError:
            pass
    await save_simulation(sim, db, "shutdown_save")
    # Push state to GitHub so it survives the next redeploy
    await save_state_to_github(data_dir)
    await db.close()
    logger.info("Soci API server stopped.")


def create_app() -> FastAPI:
    """Create the FastAPI application."""
    app = FastAPI(
        title="Soci β€” City Population Simulator",
        description="API for the LLM-powered city population simulation",
        version="0.2.0",
        lifespan=lifespan,
    )

    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )

    app.include_router(router, prefix="/api")
    app.include_router(ws_router)

    # Serve web UI
    web_dir = Path(__file__).parents[3] / "web"
    if web_dir.exists():
        @app.get("/")
        async def serve_index():
            return FileResponse(web_dir / "index.html")

        app.mount("/static", StaticFiles(directory=str(web_dir)), name="static")

    return app


app = create_app()