diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..7d506828db4661f52cc6cd497d0660dba0c57396 --- /dev/null +++ b/.env.example @@ -0,0 +1,34 @@ +# Riprap environment configuration. +# +# Copy this file to `.env` and fill in the values that match the +# inference backend you want to talk to. The default profile runs +# only the app container, so both the LLM (vLLM serving Granite 4.1) +# and the ML specialist service must be reachable at HTTP endpoints. +# +# Three common configurations: +# +# 1. Easiest — talk to the live demo's backends. Adam runs a public +# MI300X droplet for the hackathon; if it's still up at demo time, +# both endpoints are reachable from anywhere. +# +# 2. Self-hosted — bring up your own MI300X droplet via +# docs/DROPLET-RUNBOOK.md, then point both URLs at it. +# +# 3. Full local — use `docker compose --profile with-models up` to +# run the riprap-models service yourself (requires a GPU on your +# box) and point a separate vLLM container at Granite 4.1. + +# ---- Granite 4.1 reconciler (vLLM, OpenAI-compatible) ----------------- +# Set to "ollama" instead of "vllm" if you have a local Ollama with +# granite4.1:8b pulled and want to use that. +RIPRAP_LLM_PRIMARY=vllm +RIPRAP_LLM_BASE_URL=http://your-vllm-host:8000/v1 +RIPRAP_LLM_API_KEY=your-token-here + +# ---- ML specialist service (Prithvi, TerraMind, GLiNER, etc.) --------- +RIPRAP_ML_BASE_URL=http://your-ml-host:7860 +RIPRAP_ML_API_KEY=your-token-here + +# ---- Backend pill labels (cosmetic, shown top-right of the UI) -------- +RIPRAP_HARDWARE_LABEL=AMD MI300X +RIPRAP_ENGINE_LABEL=Granite 4.1 / vLLM diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..01616dee8aaa74679ecbb3734ec2bf8286b0779d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,54 @@ +# Riprap-specific LFS tracking +*.geojson filter=lfs diff=lfs merge=lfs -text +*.tif filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +# Pre-computed register paragraphs +data/registers/*.json filter=lfs diff=lfs merge=lfs -text +# Esri FileGDB internal binary files (DEP Stormwater scenario data) +*.gdbtable filter=lfs diff=lfs merge=lfs -text +*.gdbtablx filter=lfs diff=lfs merge=lfs -text +*.gdbindexes filter=lfs diff=lfs merge=lfs -text +*.atx filter=lfs diff=lfs merge=lfs -text +*.spx filter=lfs diff=lfs merge=lfs -text +*.freelist filter=lfs diff=lfs merge=lfs -text +*.horizon filter=lfs diff=lfs merge=lfs -text +*.FDO_UUID filter=lfs diff=lfs merge=lfs -text +# Hugging Face's standard LFS rules (kept for forward-compat with model assets) +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +*.pptx filter=lfs diff=lfs merge=lfs -text +assets/screenshots/** filter=lfs diff=lfs merge=lfs -text +slides/*.png filter=lfs diff=lfs merge=lfs -text diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000000000000000000000000000000000..38a9e322f2bb7b6ac27f7359f1cbc80a4f349c70 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,65 @@ +name: Bug report +description: A briefing came back wrong, a Stone failed to fire, or the UI broke. +title: "[bug] " +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for filing! Riprap is a hackathon-period demo; the more + reproducible the report, the faster it gets fixed. + - type: input + id: address + attributes: + label: NYC address tested + description: The exact string you typed (or "n/a" if the bug is UI-only). + placeholder: 80 Pioneer Street, Brooklyn + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected behavior + validations: + required: true + - type: textarea + id: actual + attributes: + label: Actual behavior + description: Paste the briefing text or describe the failure. + validations: + required: true + - type: dropdown + id: surface + attributes: + label: Where did you reproduce this? + options: + - Hosted demo (lablab Space) + - Local Docker (`docker compose up`) + - Local dev server (`uvicorn web.main:app`) + - Self-hosted GPU inference + validations: + required: true + - type: input + id: browser + attributes: + label: Browser / OS + placeholder: Chrome 142 on macOS 14 + - type: textarea + id: console + attributes: + label: Browser console errors + description: DevTools → Console. Paste anything red. + render: text + - type: textarea + id: stream + attributes: + label: /api/agent/stream output (optional) + description: | + If the bug is a Stone failure, paste the relevant lines from the + SSE trace pane (or curl `/api/agent/stream?q=
` directly). + render: text + - type: textarea + id: notes + attributes: + label: Anything else diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..40b8db9a2e1d68561769b4dea4490530422871cd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Try the live demo + url: https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space + about: Reproduce the issue against the hosted Space before filing. + - name: Read the architecture docs + url: https://github.com/msradam/riprap-nyc/tree/main/docs + about: ARCHITECTURE, METHODOLOGY, EMISSIONS, DEPLOY, BENCHMARKS, RESEARCH. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000000000000000000000000000000..6fe4f7e79ee586c376fcd21fcb756d790955cc7b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,54 @@ +name: Feature request +description: Propose a new probe, a new Stone, or a new civic-tech use case. +title: "[feat] " +labels: ["enhancement"] +body: + - type: textarea + id: usecase + attributes: + label: Civic-tech use case + description: | + Who is the user, what decision are they making, and what + evidence would Riprap need to surface to support it? + placeholder: | + e.g. "A resilience office siting a capital project needs the + joint exposure of NYCHA + schools within 200m of a Sandy + 100-year inundation polygon." + validations: + required: true + - type: textarea + id: data + attributes: + label: Data source(s) + description: | + Which public-record datasets should Riprap pull from? Include + URLs, agency owner, refresh cadence, and licence if known. + validations: + required: true + - type: dropdown + id: stone + attributes: + label: Which Stone does this belong in? + options: + - Cornerstone (hazard memory) + - Keystone (asset registers) + - Touchstone (live observation) + - Lodestone (forecast) + - Capstone (synthesis) + - Not sure / cross-cutting + validations: + required: true + - type: dropdown + id: contribute + attributes: + label: Willing to contribute the implementation? + options: + - "Yes — I can open the PR" + - "Maybe — with mentorship" + - "No — flagging the gap" + validations: + required: true + - type: textarea + id: notes + attributes: + label: Anything else diff --git a/.github/ISSUE_TEMPLATE/port_to_new_city.yml b/.github/ISSUE_TEMPLATE/port_to_new_city.yml new file mode 100644 index 0000000000000000000000000000000000000000..a1c8a6a336d03c82e393d8d3ed9835546fec127b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/port_to_new_city.yml @@ -0,0 +1,70 @@ +name: Port to a new city +description: Plan a Riprap deployment for a city other than NYC. +title: "[port] " +labels: ["port", "enhancement"] +body: + - type: markdown + attributes: + value: | + Riprap's Five Stones taxonomy is city-agnostic; only the probes + plugged into each Stone change. See the "Five Stones beyond NYC" + section in the README. This template helps scope a port. + - type: input + id: city + attributes: + label: Target city / region + placeholder: e.g. Houston, TX + validations: + required: true + - type: textarea + id: cornerstone + attributes: + label: Cornerstone — hazard memory + description: | + Local historical inundation extents, regional DEM, regulatory + floodplain maps. Include dataset URLs and licences. + validations: + required: true + - type: textarea + id: keystone + attributes: + label: Keystone — asset registers + description: | + Transit, housing, education, healthcare polygons your jurisdiction + publishes. + validations: + required: true + - type: textarea + id: touchstone + attributes: + label: Touchstone — live observation + description: | + Live sensors, complaint streams (e.g. Houston has FloodNet + analogues; many cities expose 311 or equivalent). + validations: + required: true + - type: textarea + id: lodestone + attributes: + label: Lodestone — forecast + description: | + Local NWS / hydrologic / surge models, tide gauges, time-series + fine-tunes you'd retrain. + validations: + required: true + - type: dropdown + id: hardware + attributes: + label: Target inference hardware + options: + - AMD MI300X (or other ROCm) + - NVIDIA L4 / A10 + - NVIDIA H100 / A100 + - CPU-only (Ollama) + - Not decided + validations: + required: true + - type: textarea + id: notes + attributes: + label: Anything else diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..bc5223aadbed1384535ab54e8e54414b3217d0d8 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,38 @@ + + +## Summary + + + +## Tested against + +- [ ] Local dev server (`uvicorn web.main:app`) +- [ ] Local Docker (`docker compose up`) +- [ ] Hosted lablab Space +- [ ] Self-hosted GPU inference + +## Stones-fire probe + + + +``` +PYTHONPATH=. uv run python scripts/probe_stones_fire.py --timeout 600 +``` + +## Energy-ledger sanity check + + + +## Checklist + +- [ ] No regression in `app/`, `web/`, `services/`, or + `inference-vllm/proxy.py` logic (typo-only edits OK). +- [ ] Docs updated (`README.md`, relevant `docs/*.md`) if public + surface changed. +- [ ] `CHANGELOG.md` entry under `[Unreleased]` with the right + `Added` / `Changed` / `Fixed` bucket. +- [ ] Conventional-commit prefix on the squash title + (`feat:` / `fix:` / `docs:` / `chore:` / `build:`). diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 0000000000000000000000000000000000000000..307db1d846f0758e5c1a527a7d77fe2eac67de75 --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,43 @@ +name: check + +on: + push: + branches: [main] + pull_request: + branches: [main] + +permissions: + contents: read + +jobs: + check: + name: import + lightweight tests + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v3 + with: + enable-cache: true + + - name: Set up Python 3.12 + run: uv python install 3.12 + + - name: Create venv and install deps + run: | + uv venv --python 3.12 + uv pip install -r requirements.txt + + - name: Import smoke test + env: + PYTHONPATH: . + run: | + uv run python -c "from app import fsm, llm, inference, emissions; from web import main" + + - name: Lightweight pytest subset + env: + PYTHONPATH: . + run: | + uv run pytest -q tests/test_stones.py tests/test_compare_shape.py tests/test_stone_envelope.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..59d8bd63bfc7d2481a1b0ef5f29f1aa278791185 --- /dev/null +++ b/.gitignore @@ -0,0 +1,89 @@ +# Session artifacts (legacy agent reports — not for the public repo) +*MORNING-BRIEF*.md +*OVERNIGHT*.md +*COMMS-OVERNIGHT*.md +CODE-MORNING-BRIEF*.md +MONDAY.md +FRIDAY*.md +*-REPORT.md +docs/sessions/ +docs/design_handoff/ + +# Local-only secrets / credentials +AMD_TOKEN + +# Probe / batch / diagnostic output (regenerable; not for the repo) +tests/batch_results.json +tests/overnight_audit.json +scripts/diagnostic_*.py +scripts/find_top_locations.py +scripts/verify_locations.py + +__pycache__/ +*.py[cod] +*.egg-info/ +dist/ +build/ +.venv/ +.env +.DS_Store +outputs/ +node_modules/ +*.tmp +*.log +.ruff_cache/ +.pytest_cache/ +.ipynb_checkpoints/ + +# Claude Code context (per-machine, not for the public repo) +CLAUDE.md +CLAUDE.local.md +.claude/ + +# legacy / intermediate Prithvi artifacts (not shipped) +data/hls_stack_*.tif +data/prithvi_runs/ +data/*.legacy_* +web/svelte/node_modules/ +web/sveltekit/node_modules/ +web/sveltekit/.svelte-kit/ +# web/sveltekit/build/ (uncommented to allow deployment to HF Space) +# web/sveltekit/build/ + +# Experiments — cached HF model downloads, training artifacts, intermediate +# fixtures. RESULTS.md, NOTES.md, and source code stay tracked. +experiments/**/.cache/ +experiments/**/restore/ +experiments/**/publish/ +experiments/**/*.tif +experiments/**/*.png +experiments/**/*.jpg +experiments/**/*.parquet +experiments/**/*.npy +pitch/screenshots-*/ + +# Marp deck render artefacts (regenerable via `make` in slides/) +slides/deck.pdf +slides/deck.html +slides/deck.pptx + +# Session artifacts +/tmp/riprap-* +.deploy-state +*.bak +*.swp +*.swo +.playwright-mcp/ + +# Demo recordings (large; not committed) +assets/video/ +slides/*.mp4 +slides/asce/speaker_notes.md + +# Local env overlays +.env.local +*.local.env + +# Sensitive +AMD_TOKEN +submission.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..16e678b932d183271a9252dd5597ab7431e1dd4d --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,96 @@ +# Changelog + +All notable changes to Riprap. The hackathon submission tag is +`v0.5.0` (build 2026-05-07); subsequent dates record polish work +that landed on the hackathon-period production deploys. + +## [Unreleased] — 2026-05-09 (Saturday) + +### Added +- **Per-query inference energy ledger** with real NVML readings off + the L4 GPU. The status row on the Findings region now reports + total Wh + total tokens for every briefing, with a leading icon + (`✓` / `◐` / `~`) disclosing whether the number was measured or + estimated. Full breakdown documented in + [`docs/EMISSIONS.md`](docs/EMISSIONS.md). +- `inference-vllm/proxy.py`: 100 ms-cadence NVML sampler, response + headers `X-GPU-Power-W` / `X-GPU-Energy-J` on every forwarded + POST, and a `GET /v1/power` endpoint for bracket-sampling clients. +- `app/emissions.py` — new module with a thread-local `Tracker` that + records every LLM and ML inference call (model, hardware, tokens, + duration, joules) with a `measured: bool` flag per row. +- `scripts/probe_stones_fire.py` — programmatic CI that runs an + address query against the lablab UI and asserts all five Stones + fire, no `torchvision::nms` / `deps unavailable` dep regression, + and the `emissions` block carries `nvidia_l4` hardware. +- `scripts/probe_benchmarks.py` — collects the canonical + four-address verification set into `outputs/benchmarks.json` + for the `docs/BENCHMARKS.md` page. +- `docs/EMISSIONS.md`, `docs/DEPLOY.md`, `docs/BENCHMARKS.md`, + `CHANGELOG.md`, `CONTRIBUTING.md`. + +### Changed +- The `RunHealthStrip` chip dropped the cloud-energy comparison + (the sign convention was misleading and the comparison is now + redundant given real measurements). New format: + ` X.X Wh / Y.YK tok inference`. +- `app/llm.py:_default_hardware_label` defaults to `"NVIDIA L4"` + when remote vLLM is configured (was `"AMD MI300X"`, a stale + string from the droplet days). +- `app/llm.py:chat()` now brackets every completion with two GETs + to the inference Space's `/v1/power` endpoint; the average powers + the LLM-call energy reading instead of the data-sheet estimate. +- `app/inference.py:_post()` reads NVML headers off the proxy + response and forwards real joules into `emissions.record_ml`. + +### Fixed +- `app/flood_layers/prithvi_live.py`: when the configured remote + inference call fails (`RemoteUnreachable`), the specialist no + longer falls through to the local terratorch path. The local + path crashes with `RuntimeError: operator torchvision::nms does + not exist` on the cpu-basic UI Space; surfacing a clean + `remote prithvi-pluvial unreachable` skip is correct. +- `app/context/terramind_nyc.py:_try_remote()`: returns a + `{"ok": False, "skipped": "remote terramind/: ..."}` + sentinel on remote failure, instead of `None` which was + silently masked as `deps unavailable on this deployment`. +- `web/main.py`: explicit `/favicon.svg`, `/favicon.png`, + `/favicon.ico`, `/robots.txt` routes — they were 404-ing under + the SvelteKit SPA fallback because only `/_app` was mounted off + the build directory. + +### Documentation +- Full README rewrite reflecting the post-droplet L4 topology, the + new emissions feature, and updated repo structure. Hackathon + framing preserved. +- New `docs/DEPLOY.md` with the production topology, env-var + reference, and per-Space deploy commands. +- New `docs/EMISSIONS.md` documenting what's measured vs. estimated, + the NVML pipeline, and how to verify. + +### Infrastructure note +- The DigitalOcean MI300X droplet was decommissioned 2026-05-06. + All production inference now serves from `msradam/riprap-vllm` + (NVIDIA L4). The MI300X runbook is preserved in + [`docs/DROPLET-RUNBOOK.md`](docs/DROPLET-RUNBOOK.md) for anyone + reproducing the AMD-judging setup; setting + `RIPRAP_HARDWARE_LABEL=AMD MI300X` swaps the emissions profile + back when redeploying to that hardware. + +--- + +## [v0.5.0] — 2026-05-07 + +Hackathon submission tag. + +### Added +- Five-Stone Burr FSM with Granite-native document-role messages +- Mellea four-check rejection sampling for the Capstone +- SvelteKit UI with SSE streaming, briefing prose, evidence-card + grid, MapLibre overlay, citation drawer +- Three NYC-specialised foundation models published Apache-2.0: + `msradam/TerraMind-NYC-Adapters` (LULC + Buildings + TiM LoRAs), + `msradam/Prithvi-EO-2.0-NYC-Pluvial`, + `msradam/Granite-TTM-r2-Battery-Surge` +- 30+ FSM specialists across hazard memory, asset registers, live + observation, forecasting, and citation-grounded synthesis diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..4a35af05d3a5a47b49780d6739e0cc7563551c4b --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,85 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at msrahmanadam@gmail.com. All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of actions. + +**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..7f181da6371fe2bbeaff72b03c4befaca83b77c1 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,127 @@ +# Contributing + +Riprap is the hackathon submission for the AMD × lablab.ai +Developer Hackathon, but the source ships under Apache 2.0 and is +intended to be reusable as a template for citation-grounded civic +AI in any flood-vulnerable region. Pull requests welcome. + +## Quickstart + +Python 3.12 + `uv`: + +```bash +git clone https://github.com/msradam/riprap-nyc +cd riprap-nyc +uv venv && uv pip install -r requirements.txt +``` + +SvelteKit (the build is committed; only rebuild when sources +change under `web/sveltekit/src`): + +```bash +cd web/sveltekit && npm ci && npm run build && cd ../.. +``` + +Run the dev server locally pointing at the production inference +Space (real Granite + EO models, real NVML energy readings): + +```bash +RIPRAP_LLM_PRIMARY=vllm \ +RIPRAP_LLM_BASE_URL=https://msradam-riprap-vllm.hf.space/v1 \ +RIPRAP_LLM_API_KEY= \ +RIPRAP_ML_BACKEND=remote \ +RIPRAP_ML_BASE_URL=https://msradam-riprap-vllm.hf.space \ +RIPRAP_ML_API_KEY= \ +.venv/bin/uvicorn web.main:app --host 127.0.0.1 --port 7860 +``` + +Or run pure-local with Ollama (no GPU readings; data-sheet estimate): + +```bash +ollama pull granite4.1:3b granite4.1:8b +.venv/bin/uvicorn web.main:app --host 127.0.0.1 --port 7860 +``` + +## Verifying changes + +Two probe scripts exercise the live deployment end-to-end: + +```bash +# All five Stones must fire on the canonical address; emissions +# block must carry nvidia_l4 hardware; no torchvision/terratorch +# dep regressions in the trace. +PYTHONPATH=. uv run python scripts/probe_stones_fire.py --timeout 600 + +# Full canonical suite — five NYC addresses, intent-aware checks, +# Mellea grounding budget, no specialist crashes. +.venv/bin/python scripts/probe_addresses.py \ + --base https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space +``` + +Both default to the lablab UI Space; pass `--base http://127.0.0.1:7860` +to hit a local server. + +## Structure + +``` +app/ Python package — the FSM and its specialists +├── fsm.py Burr FSM, one @action per probe +├── llm.py LiteLLM Router shim (Ollama / vLLM) +├── inference.py HTTP client for the riprap-models service +├── emissions.py Per-query energy + token tracker +├── stones/ Stone taxonomy (NAME / TAGLINE / collect()) +├── flood_layers/ Cornerstone probes (sandy, dep, microtopo, …) +├── context/ Keystone + Touchstone register + EO probes +├── live/ Lodestone forecast probes +├── intents/ single_address / neighborhood / compare / live_now +├── reconcile.py Capstone — Granite-native document reconcile +└── mellea_validator.py Mellea four-check rejection sampling + +web/ FastAPI + SvelteKit +├── main.py FastAPI app, SSE streaming, layer endpoints +├── sveltekit/ Primary UI (adapter-static; build committed) +└── static/ Legacy custom-element pages (still mounted) + +inference-vllm/ Inference Space source (vLLM + EO models + proxy) +├── Dockerfile L4 image, bakes Granite 4.1 8B FP8 + EO deps +├── entrypoint.sh Boots vllm, riprap-models, proxy as subprocesses +└── proxy.py Bearer-auth + NVML power sampler + SSE pass-through + +inference/ Ollama-backed inference Space (fallback variant) +services/riprap-models/ The EO/forecast specialist HTTP service + +scripts/ +├── probe_stones_fire.py Programmatic Stone-fire CI +├── probe_addresses.py Canonical 5-address suite +├── deploy_vllm_space.sh Deploy the L4 inference Space +├── deploy_personal_space.sh Deploy the personal L4 mirror +├── deploy_inference_space.sh Deploy the Ollama-backed inference Space +└── … Register builders, raster bakers, etc. + +experiments/ Reproduction recipes for the three NYC fine-tunes +docs/ Architecture, methodology, deploy, emissions, runbooks +tests/ pytest suite (envelope + compare-shape tests) +``` + +## Style + +- Python 3.12; `uv` for package management. +- LLM calls go through `app/llm.py` — never import `litellm` / + `ollama` directly from a specialist. The `chat()` shim wraps both + backends and the energy ledger reads off it. +- Remote ML calls go through `app/inference.py::_post`. Specialists + may try local fallback only when `inference.remote_enabled()` is + False; once a remote call has been attempted, return a clean + `{ok: False, skipped: ...}` on failure rather than crashing + through to local code paths that may not be installed. +- Every specialist emits one trace record per call with `step` / + `ok` / `elapsed_s` / `result` / `err` so the SSE stream and the + emissions tracker can reason about it. + +## Reporting issues + +GitHub issues at . +For hackathon-period demo issues during May 4–10 2026, the live +deploy at + +is the source of truth. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..38fdadb9455dc0243f0a6a0003889e1ddb6406b0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,104 @@ +# Riprap — Hugging Face Spaces deployment for the personal Space +# (msradam/riprap-nyc) on L4 hardware. +# +# Differences from the canonical Dockerfile: +# +# 1. L4 has 24 GB VRAM (vs 16 GB on T4 small), so we co-host the +# riprap-models service inside the same container instead of +# proxying to the AMD MI300X droplet. No external dependency. +# +# 2. We bake granite4.1:8b at *build* time. The build sandbox could +# not previously fit Granite + EO toolchain together; this Dockerfile +# keeps the EO install at runtime (entrypoint.l4.sh) and frees the +# sandbox budget for the 8B pull. +# +# 3. CUDA + ROCm-free torch — the inline riprap-models service uses +# the cu124 wheels installed via requirements.txt + the additional +# delta in services/riprap-models/requirements.txt. +# +# DO NOT push this image to the lablab Space — that one stays pointed +# at the MI300X droplet for AMD-judging continuity. + +FROM nvidia/cuda:12.4.1-cudnn-runtime-ubuntu22.04 AS base + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 python3-pip python3-venv python-is-python3 \ + curl ca-certificates zstd procps git \ + gdal-bin libgdal-dev libgeos-dev libproj-dev \ + libgl1 libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + +RUN useradd -m -u 1000 user +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:/usr/local/bin:/usr/bin:/bin \ + PYTHONUNBUFFERED=1 \ + HF_HOME=/home/user/.cache/huggingface \ + OLLAMA_HOST=127.0.0.1:11434 \ + OLLAMA_NUM_PARALLEL=1 \ + OLLAMA_KEEP_ALIVE=24h \ + OLLAMA_MAX_LOADED_MODELS=2 \ + OLLAMA_FLASH_ATTENTION=1 \ + OLLAMA_KV_CACHE_TYPE=q8_0 \ + OLLAMA_DEBUG=1 \ + OLLAMA_MODELS=/home/user/.ollama/models \ + RIPRAP_OLLAMA_3B_TAG=granite4.1:8b \ + RIPRAP_LLM_PRIMARY=ollama \ + RIPRAP_LLM_BASE_URL=http://127.0.0.1:11434/v1 \ + RIPRAP_ML_BACKEND=remote \ + RIPRAP_ML_BASE_URL=http://127.0.0.1:7861 + +RUN curl -fsSL https://ollama.com/install.sh | sh + +WORKDIR /home/user/app + +# Web app deps (torch cu124 lands via sentence-transformers / etc.). +COPY --chown=user:user requirements.txt ./ +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r requirements.txt + +# riprap-models delta deps. Use the existing requirements.txt at the +# *service* level, but skip requirements-full.txt — its ROCm-frozen +# torch pin would clobber the cu124 wheels installed above. +COPY --chown=user:user services/riprap-models/requirements.txt /tmp/req-models.txt +RUN pip install --no-cache-dir -r /tmp/req-models.txt + +# Bake torchvision (CUDA 12.4 wheel) and peft at build time. The +# canonical entrypoint.sh runtime-installs torchvision via the EO +# toolchain path because the canonical CPU Space's build sandbox is +# too tight; L4 builds have more room, and a properly matched +# torchvision avoids the `torchvision::nms does not exist` runtime +# error the canonical setup hits. peft is required by the riprap- +# models service for the TerraMind LoRA inference path. +RUN pip install --no-cache-dir \ + --index-url https://download.pytorch.org/whl/cu124 \ + torchvision \ + && pip install --no-cache-dir peft==0.18.1 + +# Bake Granite 4.1 weights into the image (EO toolchain is installed +# at runtime — see entrypoint.l4.sh — to keep the build sandbox under +# its disk threshold). +RUN mkdir -p $OLLAMA_MODELS && \ + ollama serve & \ + OPID=$! && \ + for i in $(seq 1 30); do curl -sf http://127.0.0.1:11434/ > /dev/null && break; sleep 1; done && \ + ollama pull granite4.1:8b && \ + kill $OPID 2>/dev/null || true && \ + sleep 2 + +# App code, fixtures, and inline model service. +COPY --chown=user:user app/ ./app/ +COPY --chown=user:user web/ ./web/ +COPY --chown=user:user scripts/ ./scripts/ +COPY --chown=user:user data/ ./data/ +COPY --chown=user:user corpus/ ./corpus/ +COPY --chown=user:user services/riprap-models/main.py ./riprap_models.py +COPY --chown=user:user agent.py riprap.py ./ +COPY --chown=user:user entrypoint.sh ./entrypoint.sh +RUN chmod +x ./entrypoint.sh + +RUN chown -R user:user /home/user +USER user + +EXPOSE 7860 +CMD ["./entrypoint.sh"] diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de1c92343e6433c35f9819f8186e4234eb01715f --- /dev/null +++ b/README.md @@ -0,0 +1,26 @@ +--- +title: Riprap NYC (Personal Mirror, L4) +emoji: 🌊 +colorFrom: blue +colorTo: indigo +sdk: docker +pinned: false +short_description: NYC flood-exposure briefings on L4 (self-contained). +--- + +# Riprap — NYC flood-exposure briefings (L4 self-contained mirror) + +This Space is a self-contained mirror of +[`github.com/msradam/riprap-nyc`](https://github.com/msradam/riprap-nyc). + +It runs on a single L4 GPU and co-hosts everything in one container: +Granite 4.1 8B (via Ollama), Prithvi-EO 2.0 NYC-Pluvial, TerraMind +LULC + Buildings LoRAs, and Granite TTM r2 — no external droplet +dependency. Sleeps on idle; first request after sleep takes ~45–60 s +to wake. + +The hackathon submission Space (CPU UI, droplet proxy) lives at +[`AMD-hackathon/riprap-nyc`](https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space). + +Apache 2.0. See the GitHub repo for full source, architecture +deep-dive, methodology, and licence map. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..729ebf8d63a28325012a8c143338133b24cd5b07 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,54 @@ +# Security policy + +## Reporting a vulnerability + +If you find a security issue in Riprap, please report it privately so +it can be triaged before disclosure. + +- Email: **msrahmanadam@gmail.com** (subject prefix: `[riprap-security]`) +- Or open a [GitHub Security Advisory](https://github.com/msradam/riprap-nyc/security/advisories/new) + on this repository. + +Please do not file a public GitHub issue for security reports. + +We aim to acknowledge reports within 72 hours and to ship a fix or a +mitigation plan within two weeks of triage. If the report concerns a +vulnerability in an upstream model or service Riprap depends on +(IBM Granite, vLLM, Hugging Face Spaces, NYC Open Data endpoints), we +will help coordinate disclosure with the upstream maintainer. + +## Threat-surface notes + +Riprap is a citation-grounded synthesis layer over public-record +data. By design, the runtime: + +- contacts only **public-record APIs** (NYC Open Data, FloodNet, + USGS, NOAA, NWS, NYS DOH, MTA, NYCHA, NYC DOE, OpenStreetMap / + Nominatim) and the configured inference Spaces; +- does **not** authenticate against user accounts or store + user-identifying data — the address bar is the only input; +- runs the SvelteKit UI as a static SPA over a FastAPI backend + with no persistent database. + +The vulnerability surface is therefore small. Plausible categories +worth a report: + +- Prompt-injection paths via document content that escape the + Mellea grounding loop and surface unverifiable claims as cited. +- SSRF / abuse via crafted address strings that drive backend + HTTP calls to unintended hosts. +- Token leakage in proxy headers or SSE streams + (`inference-vllm/proxy.py`, `web/main.py`). +- Denial-of-service patterns that exceed the hosted Space's + resource budget. +- Supply-chain issues in pinned deps (`requirements.txt`, + `web/sveltekit/package.json`). + +## Out of scope + +- Self-hosted deployments running with custom configuration or + custom datasets — please file those as regular bugs. +- Findings that require physical or local-network access to a + user's machine. +- Issues in the lablab.ai or Hugging Face Spaces hosting platforms + themselves; please report those upstream. diff --git a/agent.py b/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..3f88417bb5715fbe3654daf813e4d2fb8396ae8c --- /dev/null +++ b/agent.py @@ -0,0 +1,52 @@ +"""Riprap agent CLI — address → cited briefing via the Burr FSM. + +Usage: + python agent.py "180 Beach 35 St, Queens" + python agent.py "280 Broome St, Manhattan" --json +""" +from __future__ import annotations + +import argparse +import json +import sys +import warnings + +warnings.filterwarnings("ignore") + +from app.fsm import run # noqa: E402 + + +def main() -> int: + ap = argparse.ArgumentParser() + ap.add_argument("query", help="NYC address or natural-language location") + ap.add_argument("--json", action="store_true", help="emit full JSON state") + args = ap.parse_args() + + print(f"\n query: {args.query}", file=sys.stderr) + print(" running FSM... (Granite 4.1 + open data, all local)\n", file=sys.stderr) + + result = run(args.query) + + if args.json: + print(json.dumps(result, indent=2, default=str)) + return 0 + + print("─── trace " + "─" * 56) + for step in result["trace"]: + ok = "✓" if step["ok"] else "✗" + line = f" {ok} {step['step']:22s} {step.get('elapsed_s', 0):>5.2f}s" + if step.get("result"): + line += " " + json.dumps(step["result"], default=str) + elif step.get("err"): + line += " ERR: " + step["err"] + print(line) + + print("\n─── cited report " + "─" * 49) + print() + print(result["paragraph"]) + print() + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/areas/__init__.py b/app/areas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/areas/nta.py b/app/areas/nta.py new file mode 100644 index 0000000000000000000000000000000000000000..631fce4fbc00754b18cc6e8e60266fde7be89830 --- /dev/null +++ b/app/areas/nta.py @@ -0,0 +1,224 @@ +"""NYC Neighborhood Tabulation Area (NTA 2020) resolver. + +NTAs are NYC Department of City Planning's official neighborhood unit: +~262 polygons covering all 5 boroughs, including some park / airport +slivers. They are the canonical "neighborhood" unit for NYC civic data. + +This module provides: + - load() → GeoDataFrame with all NTAs (cached) + - resolve(name) → list of matching NTAs by fuzzy name match, or by borough + - by_code(code) → exact lookup + - polygon_for(code) → shapely Polygon in EPSG:4326 +""" +from __future__ import annotations + +import re +from functools import lru_cache +from pathlib import Path +from typing import Any + +import geopandas as gpd +from shapely.geometry import Polygon + +DATA_PATH = Path(__file__).resolve().parents[2] / "data" / "nyc_ntas_2020.geojson" + +# Common alias map: user-typed strings → canonical NTA names. We don't need to +# be exhaustive here; the fuzzy matcher catches most cases. This handles the +# few hard ones where the official NTA name differs from local usage. +ALIASES = { + "the rockaways": "Rockaway Beach-Arverne-Edgemere", + "rockaway": "Rockaway Beach-Arverne-Edgemere", + "brighton": "Brighton Beach", + "lower east side": "Lower East Side", + "les": "Lower East Side", + "soho": "SoHo-Little Italy-Hudson Square", + "tribeca": "Tribeca-Civic Center", + "fidi": "Financial District-Battery Park City", + "downtown brooklyn":"Downtown Brooklyn-DUMBO-Boerum Hill", + "dumbo": "Downtown Brooklyn-DUMBO-Boerum Hill", + "park slope": "Park Slope", + "carroll gardens": "Carroll Gardens-Cobble Hill-Gowanus-Red Hook", + "red hook": "Carroll Gardens-Cobble Hill-Gowanus-Red Hook", + "gowanus": "Carroll Gardens-Cobble Hill-Gowanus-Red Hook", + "hollis": "Queens Village-Hollis-Bellerose", + "long island city": "Hunters Point-Sunnyside-West Maspeth", + "lic": "Hunters Point-Sunnyside-West Maspeth", + "astoria": "Astoria (Central)", + "flushing": "Flushing-Willets Point", + "harlem": "Central Harlem (North)", + "east harlem": "East Harlem (North)", + "washington heights":"Washington Heights (North)", + "midtown": "Midtown South-Flatiron-Union Square", + "upper east side": "Upper East Side-Carnegie Hill", + "ues": "Upper East Side-Carnegie Hill", + "upper west side": "Upper West Side-Lincoln Square", + "uws": "Upper West Side-Lincoln Square", + "coney island": "Coney Island-Sea Gate", +} + +BOROUGH_NORMALIZE = { + "manhattan": "Manhattan", "mn": "Manhattan", + "brooklyn": "Brooklyn", "bk": "Brooklyn", "kings": "Brooklyn", + "queens": "Queens", "qn": "Queens", + "bronx": "Bronx", "the bronx": "Bronx", "bx": "Bronx", + "staten island": "Staten Island", "si": "Staten Island", "richmond": "Staten Island", +} + + +def _normalize(s: str) -> str: + return re.sub(r"[^a-z]+", "", (s or "").lower()) + + +@lru_cache(maxsize=1) +def load() -> gpd.GeoDataFrame: + """Load the NTA 2020 GeoJSON; coerce CRS to EPSG:4326. Cached.""" + g = gpd.read_file(DATA_PATH) + if g.crs is None or g.crs.to_string() != "EPSG:4326": + g = g.to_crs("EPSG:4326") + return g + + +def by_code(code: str) -> dict | None: + g = load() + hit = g[g["nta2020"] == code] + if hit.empty: + return None + return _row_to_dict(hit.iloc[0]) + + +def _row_to_dict(row) -> dict: + return { + "nta_code": row["nta2020"], + "nta_name": row["ntaname"], + "borough": row["boroname"], + "cdta": row.get("cdtaname"), + "geometry": row["geometry"], + } + + +def borough_match(query: str) -> str | None: + """If query matches a borough name (or common abbreviation), return the + canonical name. Otherwise return None.""" + q = query.strip().lower() + return BOROUGH_NORMALIZE.get(q) + + +def resolve(query: str) -> list[dict[str, Any]]: + """Resolve a free-text query to NTA(s). + + Strategy (in priority order): + 1. Borough match → all NTAs in borough. + 2. Alias map → exact NTA name match. + 3. Case-insensitive EXACT name match (so 'Kew Gardens' wins over + 'Kew Gardens Hills' when both exist). + 4. Substring match on normalized NTA name. When multiple match, + prefer the one whose normalized name length is closest to the + query — avoids 'Kew Gardens' resolving to 'Kew Gardens Hills'. + 5. CDTA-name substring fallback. + """ + g = load() + q = (query or "").strip() + if not q: + return [] + boro = borough_match(q) + if boro: + hits = g[g["boroname"] == boro] + return [_row_to_dict(r) for _, r in hits.iterrows()] + + alias = ALIASES.get(q.lower()) + if alias: + hits = g[g["ntaname"] == alias] + if not hits.empty: + return [_row_to_dict(r) for _, r in hits.iterrows()] + + # Exact (case-insensitive) — preferred over substring + name_lower = g["ntaname"].fillna("").str.lower() + exact = g[name_lower == q.lower()] + if not exact.empty: + return [_row_to_dict(r) for _, r in exact.iterrows()] + + qn = _normalize(q) + if not qn: + return [] + name_norm = g["ntaname"].fillna("").map(_normalize) + contains = g[name_norm.str.contains(qn, na=False)].copy() + if not contains.empty: + contains["_diff"] = contains["ntaname"].fillna("").map( + lambda s: abs(len(_normalize(s)) - len(qn)) + ) + contains = contains.sort_values("_diff") + return [_row_to_dict(r) for _, r in contains.iterrows()] + + cdta_norm = g["cdtaname"].fillna("").map(_normalize) + contains = g[cdta_norm.str.contains(qn, na=False)] + if not contains.empty: + return [_row_to_dict(r) for _, r in contains.iterrows()] + + return [] + + +def polygon_for(code: str) -> Polygon | None: + hit = by_code(code) + return hit["geometry"] if hit else None + + +def resolve_from_text(text: str) -> list[dict[str, Any]]: # TODO(cleanup): cc-grade-D (25) + """Scan free-text (e.g. a full natural-language query) for any known NTA + name, alias, or borough. Returns the first match. This is the fallback + when the planner failed to extract a clean target. + + Strategy: walk ALIASES first (cheap), then iterate NTA names and look + for the longest match contained in the text. We prefer the longest + match so 'Carroll Gardens' wins over 'Gardens'. + """ + t = (text or "").lower() + if not t: + return [] + # Boroughs first (whole-word-ish — avoid false hits inside "queensland" etc.) + for boro_key, canon in BOROUGH_NORMALIZE.items(): + if f" {boro_key} " in f" {t} " or t.startswith(boro_key + " ") or t.endswith(" " + boro_key): + hits = resolve(canon) + if hits: + return hits + # Alias keys, longest first + for key in sorted(ALIASES.keys(), key=len, reverse=True): + if key in t: + hits = resolve(key) + if hits: + return hits + # NTA names. Order: longest first so multi-word names match before + # shorter substrings, AND preferring the WORD-BOUNDARY match so + # "Kew Gardens" in the query doesn't collide with "Kew Gardens Hills" + # (the latter is longer; without word-boundary checking it'd match + # nothing, but with substring-in-text it'd match if the query ever + # contained the longer phrase). Caller picks the closest-length match. + g = load() + names = sorted(set(g["ntaname"].dropna().str.lower().tolist()), key=len, reverse=True) + matches = [] + for name in names: + if not name or len(name) < 4: + continue + # Word-boundary-ish check: name must appear bounded by start/end or + # whitespace/punct (so "kew gardens hills" matches but "kew gardens" + # alone doesn't trigger "kew gardens hills" because of the trailing + # space requirement). + padded_t = f" {t} " + if f" {name} " in padded_t or f" {name}." in padded_t or f" {name}," in padded_t or f" {name}?" in padded_t: + matches.append(name) + if matches: + # Prefer the longest word-boundary match — most specific. + best = sorted(matches, key=len, reverse=True)[0] + hits = resolve(best) + if hits: + return hits + # Fallback: any substring (no boundary). Less precise, but catches + # casual queries like "show me red hook" where "red hook" is a + # neighborhood-name fragment within a longer NTA name. + for name in names: + if not name or len(name) < 4: + continue + if name in t: + hits = resolve(name) + if hits: + return hits + return [] diff --git a/app/assets/__init__.py b/app/assets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/assets/mta_entrances.py b/app/assets/mta_entrances.py new file mode 100644 index 0000000000000000000000000000000000000000..291c65c8f4f64c20625167a3aafa6e4990ff28ad --- /dev/null +++ b/app/assets/mta_entrances.py @@ -0,0 +1,73 @@ +"""MTA Subway Entrances and Exits (NY OpenData i9wp-a4ja). + +~1,900 subway entrances city-wide. The MTA Climate Resilience Roadmap +(Oct 2025) names ~1,500 of these as priorities for sealing — this is +exactly the asset class our RAG corpus has the most to say about, and +exactly the audience (MTA capital planners, transit advocacy) the +register is built for. +""" +from __future__ import annotations + +from pathlib import Path + +import geopandas as gpd +import httpx + +from app.spatial import DATA, NYC_CRS + +URL = "https://data.ny.gov/api/geospatial/i9wp-a4ja?method=export&format=GeoJSON" +LOCAL = DATA / "mta_entrances.geojson" + + +def _ensure_fixture() -> Path: + if LOCAL.exists(): + return LOCAL + print("downloading MTA Subway Entrances (one-time)...", flush=True) + r = httpx.get(URL, timeout=60) + r.raise_for_status() + LOCAL.write_text(r.text) + return LOCAL + + +def load() -> gpd.GeoDataFrame: + _ensure_fixture() + g = gpd.read_file(LOCAL) + if g.crs is None: + g.set_crs("EPSG:4326", inplace=True) + g = g.to_crs(NYC_CRS) + rename_map = { + "stop_name": "name", + "constrained_floor_to_floor_height": None, + "borough": "borough", + "entrance_type": "entrance_type", + "ada": "ada", + "north_south_street": "ns_street", + "east_west_street": "ew_street", + "corner": "corner", + } + for k, v in rename_map.items(): + if v and k in g.columns and k != v: + g = g.rename(columns={k: v}) + + # build a usable address-style label + def label(row): + nm = (row.get("name") or "").strip() + ns = (row.get("ns_street") or "").strip() + ew = (row.get("ew_street") or "").strip() + cn = (row.get("corner") or "").strip() + bits = [nm] + cross = " & ".join(b for b in [ns, ew] if b) + if cross: bits.append(cross) + if cn: bits.append(f"({cn})") + return ", ".join([b for b in bits if b]) + + g["address"] = g.apply(label, axis=1) + if "borough" in g.columns: + boro_map = {"M": "Manhattan", "Bk": "Brooklyn", "B": "Brooklyn", + "Q": "Queens", "Bx": "Bronx", "SI": "Staten Island"} + g["borough"] = g["borough"].astype(str).map(lambda v: boro_map.get(v, v.title())) + + keep = [c for c in ["name", "address", "borough", "entrance_type", + "ada", "ns_street", "ew_street", "corner", "geometry"] + if c in g.columns] + return g[keep].copy() diff --git a/app/assets/nycha.py b/app/assets/nycha.py new file mode 100644 index 0000000000000000000000000000000000000000..188bbcc062a7162bcf5ad609bbf62e1fd01dd3d2 --- /dev/null +++ b/app/assets/nycha.py @@ -0,0 +1,28 @@ +"""NYCHA Developments (NYC OpenData phvi-damg). + +326 public-housing developments across NYC. Used as an asset class for +the bulk-mode register; the parent rationale for surfacing this layer +is that NYCHA was hit hard by Sandy and remains a published Tier-1 +flood-resilience priority in the city's Hazard Mitigation Plan. +""" +from __future__ import annotations + +import geopandas as gpd + +from app.spatial import DATA, load_layer + + +def load() -> gpd.GeoDataFrame: + g = load_layer(DATA / "nycha.geojson") + # NYCHA developments come back as polygons; the FSM expects point + # geometry for spatial joins. Use centroid. + g = g.copy() + g["geometry"] = g.geometry.centroid + + # NYCHA Developments has only `developmen` (truncated label), tds_num, borough. + g = g.rename(columns={"developmen": "name"}) + g["address"] = g["name"] # the field doubles as both + g["borough"] = g["borough"].str.title() # "BRONX" -> "Bronx" to match Riprap convention + + keep = [c for c in ["name", "address", "borough", "tds_num", "geometry"] if c in g.columns] + return g[keep].copy() diff --git a/app/assets/schools.py b/app/assets/schools.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b6b0bd0db2fc94ee227134070dc01dc00cc1ca --- /dev/null +++ b/app/assets/schools.py @@ -0,0 +1,27 @@ +"""NYC DOE School Point Locations (Socrata a3nt-yts4).""" +from __future__ import annotations + +import geopandas as gpd + +from app.spatial import DATA, load_layer + +BORO = {"1": "Manhattan", "2": "Bronx", "3": "Brooklyn", "4": "Queens", "5": "Staten Island"} + + +def load() -> gpd.GeoDataFrame: + g = load_layer(DATA / "schools.geojson") + g = g.rename(columns={ + "loc_code": "loc_code", + "loc_name": "name", + "address": "address", + "bbl": "bbl", + "bin": "bin", + "boronum": "boro_num", + "geodistric": "geo_district", + "adimindist": "admin_district", + }) + g["borough"] = g["boro_num"].astype(str).map(BORO) + g["bbl"] = g["bbl"].astype(str).str.replace(r"\.0$", "", regex=True) + keep = ["loc_code", "name", "address", "borough", "bbl", "bin", + "geo_district", "admin_district", "geometry"] + return g[keep].copy() diff --git a/app/context/__init__.py b/app/context/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/context/_polygonize.py b/app/context/_polygonize.py new file mode 100644 index 0000000000000000000000000000000000000000..4911f21970d17a01814f7e2d2b27261334d8fb5e --- /dev/null +++ b/app/context/_polygonize.py @@ -0,0 +1,165 @@ +"""Vectorize a uint8 prediction raster (binary mask or class index) +into an EPSG:4326 GeoJSON FeatureCollection so the frontend can paint +it on the MapLibre map. + +The droplet's `/v1/prithvi-pluvial` and `/v1/terramind` routes return +their predictions as base64-encoded uint8 with a shape and (where +relevant) a class-label list. This module reconstructs the affine +transform from the chip's geographic bounds (which the HF Space +already knows) and walks `rasterio.features.shapes` to build polygons +in the chip's native CRS, then reprojects to WGS84 for the map. + +Best-effort: any failure returns an empty FeatureCollection rather +than raising into the caller's path. The map layer is decorative — +the briefing is the deliverable. +""" +from __future__ import annotations + +import base64 +import logging + +log = logging.getLogger("riprap.polygonize") + +EMPTY: dict = {"type": "FeatureCollection", "features": []} + + +def _decode_pred(pred_b64: str, pred_shape: list[int]): + """Inverse of the droplet's `base64(pred.tobytes())`. Returns a + uint8 numpy array of shape `pred_shape`, or None on decode error.""" + try: + import numpy as np + raw = base64.b64decode(pred_b64) + return np.frombuffer(raw, dtype="uint8").reshape(pred_shape) + except Exception: + log.exception("polygonize: pred decode failed") + return None + + +def polygonize_class_raster( + pred_b64: str, + pred_shape: list[int], + class_labels: list[str] | None, + bounds_4326: tuple[float, float, float, float], + *, + drop_classes: tuple[int, ...] = (0,), + simplify_tolerance: float = 0.0, +) -> dict: + """Vectorize a categorical prediction raster (one integer class per + pixel) into a FeatureCollection with one Feature per connected + polygon. `bounds_4326` is `(minlon, minlat, maxlon, maxlat)` of the + chip; the raster is assumed to span those bounds at uniform + pixel size. Each feature carries `class_idx` and `class_label` + so the frontend can color by class. + + `drop_classes`: skip pixels matching these class indices (default + drops 0 = "Background" / "outside" / etc). + """ + pred = _decode_pred(pred_b64, pred_shape) + if pred is None: + return EMPTY + try: + from rasterio.features import shapes + from rasterio.transform import from_bounds + from shapely.geometry import shape + h, w = pred.shape + minlon, minlat, maxlon, maxlat = bounds_4326 + # The chip is in EPSG:4326 for our use — Sentinel-2 chips are + # natively in their UTM zone, but we can polygonize against the + # WGS84 extent because the inference chip is a small bbox where + # the pixel-grid → lat/lon mapping is locally affine (sub-pixel + # error at NYC scale). + transform = from_bounds(minlon, minlat, maxlon, maxlat, w, h) + feats = [] + for geom, value in shapes(pred, mask=pred > 0, transform=transform): + v = int(value) + if v in drop_classes: + continue + label = (class_labels[v] + if class_labels and 0 <= v < len(class_labels) + else f"class_{v}") + poly = shape(geom) + if simplify_tolerance > 0: + poly = poly.simplify(simplify_tolerance, preserve_topology=True) + if poly.is_empty: + continue + feats.append({ + "type": "Feature", + "geometry": poly.__geo_interface__, + "properties": { + "class_idx": v, + "class_label": label, + "fill_color": _PALETTE.get(label.lower(), _DEFAULT_FILL), + }, + }) + return {"type": "FeatureCollection", "features": feats} + except Exception: + log.exception("polygonize: class raster vectorisation failed") + return EMPTY + + +def polygonize_binary_mask( + pred_b64: str, + pred_shape: list[int], + bounds_4326: tuple[float, float, float, float], + *, + label: str = "water", + fill_color: str = "#4A90E2", + simplify_tolerance: float = 0.0, +) -> dict: + """Vectorize a binary prediction raster (e.g. Prithvi water mask; + 1 = water, 0 = not). Returns one Feature per connected positive + region. Use this for prithvi_eo_live and the buildings LoRA.""" + pred = _decode_pred(pred_b64, pred_shape) + if pred is None: + return EMPTY + try: + from rasterio.features import shapes + from rasterio.transform import from_bounds + from shapely.geometry import shape + h, w = pred.shape + minlon, minlat, maxlon, maxlat = bounds_4326 + transform = from_bounds(minlon, minlat, maxlon, maxlat, w, h) + feats = [] + for geom, _value in shapes(pred, mask=pred > 0, transform=transform): + poly = shape(geom) + if simplify_tolerance > 0: + poly = poly.simplify(simplify_tolerance, preserve_topology=True) + if poly.is_empty: + continue + feats.append({ + "type": "Feature", + "geometry": poly.__geo_interface__, + "properties": { + "class_label": label, + "fill_color": fill_color, + }, + }) + return {"type": "FeatureCollection", "features": feats} + except Exception: + log.exception("polygonize: binary mask vectorisation failed") + return EMPTY + + +# Lightweight palette used by the LULC + buildings layers. Frontend +# may override via `fill_color` per feature; this is a sensible +# default keyed on lowercase class labels. +_DEFAULT_FILL = "#A0A0A0" +_PALETTE = { + # ESRI 2020 LULC schema (terramind v1 base generative) + "water": "#1F77B4", + "trees": "#2CA02C", + "grass": "#7FBF53", + "flooded vegetation": "#74C476", + "crops": "#E1C75A", + "scrub/shrub": "#A6BC44", + "built": "#D62728", + "bare ground": "#B07A4C", + "snow/ice": "#E0E7EC", + "clouds": "#CCCCCC", + # NYC LoRA LULC schema + "cropland": "#E1C75A", + "bare": "#B07A4C", + # Buildings LoRA + "building": "#D62728", + "background": _DEFAULT_FILL, +} diff --git a/app/context/dob_permits.py b/app/context/dob_permits.py new file mode 100644 index 0000000000000000000000000000000000000000..4d8438421fa5e3d865782fb9d785aa63a13f426c --- /dev/null +++ b/app/context/dob_permits.py @@ -0,0 +1,258 @@ +"""NYC DOB construction-permit specialist — "what are they building". + +Pulls active NYC DOB Permit Issuance records (Socrata `ipu4-2q9a`) +inside a polygon, filtered to recent New Building (NB), major +Alteration (A1), and Demolition (DM) jobs. Each project is then +cross-referenced against the static flood layers (Sandy 2012, DEP +Stormwater scenarios) so the reconciler can write things like: + + "12 active major construction projects in Gowanus. Of these, + 8 sit inside the DEP Extreme-2080 stormwater scenario." + +The dataset uses separate gis_latitude / gis_longitude columns rather +than a Socrata Point, so we bbox-filter via SoQL then do exact +point-in-polygon containment client-side with shapely. +""" +from __future__ import annotations + +import logging +from collections import Counter +from dataclasses import asdict, dataclass +from datetime import date, datetime, timedelta +from typing import Any + +import geopandas as gpd +import httpx +from shapely.geometry import Point + +log = logging.getLogger("riprap.dob_permits") + +URL = "https://data.cityofnewyork.us/resource/ipu4-2q9a.json" +DOC_ID = "dob_permits" +CITATION = ("NYC DOB Permit Issuance (NYC OpenData ipu4-2q9a) — " + "issued/in-progress construction permits") + +JOB_TYPE_LABELS = { + "NB": "new building", + "A1": "major alteration (use/occupancy)", + "A2": "minor alteration", + "A3": "minor work / interior", + "DM": "demolition", + "SG": "sign", + "PL": "plumbing", + "EQ": "equipment", +} + +# Default filter: focus on "what are they building" — new construction, +# major alterations, demolitions. Skip minor mechanical permits. +DEFAULT_JOB_TYPES = ("NB", "A1", "DM") + + +@dataclass +class Permit: + job_id: str + job_type: str + job_type_label: str + permit_status: str + issuance_date: str + expiration_date: str | None + address: str + borough: str + bbl: str | None + lat: float + lon: float + owner_business: str | None + permittee_business: str | None + nta_name: str | None + + +def permits_in_bbox(min_lat: float, min_lon: float, + max_lat: float, max_lon: float, + job_types: tuple[str, ...] = DEFAULT_JOB_TYPES, + since: date | None = None, + limit: int = 5000) -> list[Permit]: + """Pull DOB permits intersecting a bounding box, recently issued, with + matching job types. We expand from polygon to bbox and rely on the + caller to do exact point-in-polygon filtering.""" + if since is None: + since = date.today() - timedelta(days=540) # ~18 months + # gis_latitude/gis_longitude are stored as text in this dataset; cast + # to number for the bbox compare. issuance_date is a floating timestamp + # surfaced as 'MM/DD/YYYY' string — cast explicitly to floating_timestamp + # so the comparator parses ISO dates correctly. BETWEEN is picky on text + # columns, so use explicit >= / <= operators. + where = ( + f"job_type IN ({','.join(repr(t) for t in job_types)})" + f" AND issuance_date::floating_timestamp >= '{since.isoformat()}'" + f" AND gis_latitude::number >= {min_lat}" + f" AND gis_latitude::number <= {max_lat}" + f" AND gis_longitude::number >= {min_lon}" + f" AND gis_longitude::number <= {max_lon}" + ) + r = httpx.get(URL, params={ + "$select": ",".join([ + "job__", "job_type", "permit_status", "issuance_date", + "expiration_date", "house__", "street_name", "borough", + "block", "lot", + "gis_latitude", "gis_longitude", "owner_s_business_name", + "permittee_s_business_name", "gis_nta_name", + ]), + "$where": where, + "$order": "issuance_date desc", + "$limit": str(limit), + }, timeout=60) + r.raise_for_status() + out: list[Permit] = [] + for row in r.json(): + try: + lat = float(row["gis_latitude"]) + lon = float(row["gis_longitude"]) + except (KeyError, ValueError, TypeError): + continue + addr = " ".join(filter(None, [ + row.get("house__"), + (row.get("street_name") or "").title(), + ])).strip() + # DOB has no `bbl` column; compose from borough + block + lot. + # Borough codes: MAN=1, BX=2, BK=3, QN=4, SI=5. + boro_code = {"MANHATTAN": "1", "BRONX": "2", "BROOKLYN": "3", + "QUEENS": "4", "STATEN ISLAND": "5"}.get( + (row.get("borough") or "").upper()) + block = (row.get("block") or "").lstrip("0") + lot = (row.get("lot") or "").lstrip("0") + bbl = (f"{boro_code}-{block.zfill(5)}-{lot.zfill(4)}" + if boro_code and block and lot else None) + out.append(Permit( + job_id=row.get("job__", ""), + job_type=row.get("job_type", ""), + job_type_label=JOB_TYPE_LABELS.get(row.get("job_type", ""), row.get("job_type", "")), + permit_status=row.get("permit_status", ""), + issuance_date=(row.get("issuance_date") or "")[:10], + expiration_date=(row.get("expiration_date") or "")[:10] or None, + address=addr, + borough=(row.get("borough") or "").title(), + bbl=bbl, + lat=lat, + lon=lon, + owner_business=row.get("owner_s_business_name"), + permittee_business=row.get("permittee_s_business_name"), + nta_name=row.get("gis_nta_name"), + )) + return out + + +def permits_in_polygon(polygon, polygon_crs: str = "EPSG:4326", + job_types: tuple[str, ...] = DEFAULT_JOB_TYPES, + since: date | None = None) -> list[Permit]: + """Permits inside a polygon. Uses bbox prefilter + shapely contains.""" + g = gpd.GeoDataFrame(geometry=[polygon], crs=polygon_crs).to_crs("EPSG:4326") + geom = g.iloc[0].geometry + minx, miny, maxx, maxy = geom.bounds + raw = permits_in_bbox(miny, minx, maxy, maxx, job_types=job_types, since=since) + out: list[Permit] = [] + for p in raw: + pt = Point(p.lon, p.lat) + if geom.contains(pt) or geom.intersects(pt): + out.append(p) + # Dedupe by job_id (one job can have multiple permits as work proceeds) + seen: dict[str, Permit] = {} + for p in out: + # Keep the most-recently-issued permit per job + cur = seen.get(p.job_id) + if cur is None or (p.issuance_date or "") > (cur.issuance_date or ""): + seen[p.job_id] = p + return list(seen.values()) + + +def cross_reference_flood(permits: list[Permit]) -> list[dict[str, Any]]: + """Tag each permit with which flood layers cover its point. + Adds: in_sandy (bool), dep_class (highest depth class hit across DEP scenarios), + dep_scenarios (list of scenario ids that fired).""" + if not permits: + return [] + from app.flood_layers import dep_stormwater, sandy_inundation + pts = gpd.GeoDataFrame( + geometry=[Point(p.lon, p.lat) for p in permits], + crs="EPSG:4326", + ).to_crs("EPSG:2263") + pts["_pid"] = list(range(len(pts))) + + sandy_flags = sandy_inundation.join(pts).reset_index(drop=True).tolist() + + dep_hits = {scen: dep_stormwater.join(pts, scen)["depth_class"].astype(int).tolist() + for scen in ("dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current")} + + out = [] + for i, p in enumerate(permits): + scen_hits = {s: dep_hits[s][i] for s in dep_hits} + max_class = max(scen_hits.values(), default=0) + active_scens = [s for s, c in scen_hits.items() if c > 0] + out.append({ + **asdict(p), + "in_sandy": bool(sandy_flags[i]), + "dep_max_class": max_class, + "dep_scenarios": active_scens, + "any_flood_layer_hit": bool(sandy_flags[i] or max_class > 0), + }) + return out + + +def summary_for_polygon(polygon, polygon_crs: str = "EPSG:4326", + since_days: int = 540, + top_n: int = 8) -> dict: + """Full polygon-mode summary: list active permits, cross-reference each + with flood layers, return aggregate counts + a top-N projects-of-concern + list (those that hit at least one flood layer, ranked by max DEP class + + Sandy hit).""" + since = date.today() - timedelta(days=since_days) + permits = permits_in_polygon(polygon, polygon_crs=polygon_crs, since=since) + enriched = cross_reference_flood(permits) + + by_type: Counter = Counter(e["job_type_label"] for e in enriched) + by_status: Counter = Counter(e["permit_status"] for e in enriched) + n_total = len(enriched) + n_sandy = sum(1 for e in enriched if e["in_sandy"]) + n_dep_any = sum(1 for e in enriched if e["dep_max_class"] > 0) + n_dep_severe = sum(1 for e in enriched if e["dep_max_class"] >= 2) + n_any_flood = sum(1 for e in enriched if e["any_flood_layer_hit"]) + + # Rank: severity = (in_sandy * 3) + dep_max_class + def severity(e): + return (3 if e["in_sandy"] else 0) + e["dep_max_class"] + flagged = sorted( + [e for e in enriched if e["any_flood_layer_hit"]], + key=severity, reverse=True, + )[:top_n] + + # Light projection of every permit for map pinning (no need to ship the + # full permit record for the not-flagged ones — the map only needs lat, + # lon, address, job_type_label, and the flood-flag fields). + all_pins = [ + { + "lat": e["lat"], + "lon": e["lon"], + "address": e["address"], + "job_type": e["job_type"], + "in_sandy": e["in_sandy"], + "dep_max_class": e["dep_max_class"], + "any_flood": e["any_flood_layer_hit"], + } + for e in enriched + ] + return { + "since": since.isoformat(), + "n_total": n_total, + "n_in_sandy": n_sandy, + "n_in_dep_any": n_dep_any, + "n_in_dep_severe": n_dep_severe, + "n_any_flood": n_any_flood, + "by_job_type": dict(by_type.most_common()), + "by_permit_status":dict(by_status.most_common()), + "flagged_top": flagged, + "all_pins": all_pins, + "all_count": n_total, + } + + +def now_iso() -> str: + return datetime.utcnow().date().isoformat() diff --git a/app/context/eo_chip_cache.py b/app/context/eo_chip_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..93f6352f6e8305c7bf1dc3388768ec3d9a1662e9 --- /dev/null +++ b/app/context/eo_chip_cache.py @@ -0,0 +1,345 @@ +"""Per-query EO chip cache — Sentinel-2 L2A, Sentinel-1 RTC, DEM. + +Fetches a co-registered (S2L2A, S1RTC, DEM) chip centered on (lat, lon) +and returns a dict of torch tensors ready for TerraMind-NYC inference. +The TerraMind base was trained with `temporal_n_timestamps=4`, so this +helper expands a single S2/S1 acquisition to T=4 by repetition along +the temporal axis. Single-timestep nowcasting trades some training- +distribution match for a much simpler runtime — the published LoRA +adapters still produce sensible argmax masks at T=1 / tiled. + +Failure semantics mirror prithvi_live: every dependency or network +failure is converted to a clean `{ok: False, skipped: }` +result, never a raised exception. Callers (FSM specialists) that +chain off the chip can short-circuit on `ok=False` and skip the +specialist instead of surfacing a noisy error. +""" +from __future__ import annotations + +import concurrent.futures +import logging +import os +import threading +import time +from typing import Any + +log = logging.getLogger("riprap.eo_chip_cache") + +ENABLE = os.environ.get("RIPRAP_EO_CHIP_ENABLE", "1").lower() in ("1", "true", "yes") +SEARCH_DAYS = int(os.environ.get("RIPRAP_EO_CHIP_SEARCH_DAYS", "120")) +MAX_CLOUD_PCT = float(os.environ.get("RIPRAP_EO_CHIP_MAX_CLOUD", "30")) +CHIP_PX = int(os.environ.get("RIPRAP_EO_CHIP_PX", "224")) +PIXEL_M = 10 +N_TIMESTEPS = 4 + +# 12-band S2 L2A in TerraMind's expected order. +S2_BANDS = ["B01", "B02", "B03", "B04", "B05", "B06", "B07", + "B08", "B8A", "B09", "B11", "B12"] + +# Sentinel-1 RTC on Planetary Computer publishes vv/vh polarisations. +S1_BANDS = ["vv", "vh"] + + +def _has_required_deps() -> tuple[bool, str | None]: + missing: list[str] = [] + for name in ("planetary_computer", "pystac_client", + "rioxarray", "xarray", "torch", "numpy"): + try: + __import__(name) + except ImportError: + missing.append(name) + if missing: + return False, ", ".join(missing) + return True, None + + +_DEPS_OK, _DEPS_MISSING = _has_required_deps() +_FETCH_LOCK = threading.Lock() + + +def _search_s2(lat: float, lon: float): + """Return (item, cloud_cover) for the most recent low-cloud S2L2A + acquisition near (lat, lon), or (None, None) if no scene exists.""" + import datetime as dt + + import planetary_computer as pc + from pystac_client import Client + end = dt.datetime.utcnow().date() + start = end - dt.timedelta(days=SEARCH_DAYS) + client = Client.open( + "https://planetarycomputer.microsoft.com/api/stac/v1", + modifier=pc.sign_inplace, + ) + delta = 0.02 + search = client.search( + collections=["sentinel-2-l2a"], + bbox=[lon - delta, lat - delta, lon + delta, lat + delta], + datetime=f"{start}/{end}", + query={"eo:cloud_cover": {"lt": MAX_CLOUD_PCT}}, + max_items=20, + ) + items = sorted( + search.items(), + key=lambda it: (it.properties.get("eo:cloud_cover", 100), + -(it.datetime.timestamp() if it.datetime else 0)), + ) + if not items: + return None, None + item = items[0] + cc = float(item.properties.get("eo:cloud_cover", -1)) + return item, cc + + +def _search_s1(item_dt, lat: float, lon: float): + """Return the closest Sentinel-1 RTC acquisition to the given S2 + datetime, or None if Planetary Computer has nothing nearby.""" + import datetime as dt + + import planetary_computer as pc + from pystac_client import Client + win = dt.timedelta(days=10) + start = item_dt - win + end = item_dt + win + client = Client.open( + "https://planetarycomputer.microsoft.com/api/stac/v1", + modifier=pc.sign_inplace, + ) + delta = 0.02 + search = client.search( + collections=["sentinel-1-rtc"], + bbox=[lon - delta, lat - delta, lon + delta, lat + delta], + datetime=f"{start.isoformat()}/{end.isoformat()}", + max_items=10, + ) + items = list(search.items()) + if not items: + return None + items.sort(key=lambda it: + abs((it.datetime - item_dt).total_seconds()) + if it.datetime else 1e18) + return items[0] + + +def _read_band(href, bbox_xy_meters, epsg): + """Read a single COG band, clipped to the bbox, and resample to + CHIP_PX × CHIP_PX. Returns a numpy array (CHIP_PX, CHIP_PX) float32. + """ + import numpy as np + import rioxarray # noqa: F401 + da = rioxarray.open_rasterio(href, masked=False).squeeze(drop=True) + da = da.rio.clip_box(minx=bbox_xy_meters[0], miny=bbox_xy_meters[1], + maxx=bbox_xy_meters[2], maxy=bbox_xy_meters[3]) + if da.shape[-2] != CHIP_PX or da.shape[-1] != CHIP_PX: + # Resample (nearest is fine for the 10/20/60 m S2 mix; S1 is 10 m, + # DEM is 30 m and benefits from bilinear; we keep nearest for + # simplicity — the TerraMind LoRA was trained against terratorch's + # default resampler which is also nearest). + da = da.rio.reproject( + f"EPSG:{epsg}", shape=(CHIP_PX, CHIP_PX), resampling=0 + ) + arr = da.values.astype("float32") + return np.nan_to_num(arr) + + +def _fetch_modalities(lat: float, lon: float, timeout_s: float = 60.0) -> dict[str, Any]: + """Fetch S2L2A + S1RTC + DEM as numpy arrays, resampled to a common + CHIP_PX × CHIP_PX grid centered on (lat, lon). + """ + import numpy as np + from pyproj import Transformer + + t0 = time.time() + item, cc = _search_s2(lat, lon) + if item is None: + return {"ok": False, + "skipped": f"no <{MAX_CLOUD_PCT}% cloud S2 in last " + f"{SEARCH_DAYS}d"} + if "proj:epsg" in item.properties: + epsg = int(item.properties["proj:epsg"]) + else: + code = item.properties.get("proj:code", "") + if not code.startswith("EPSG:"): + return {"ok": False, + "skipped": "STAC item missing proj:epsg / proj:code"} + epsg = int(code.split(":", 1)[1]) + + fwd = Transformer.from_crs("EPSG:4326", f"EPSG:{epsg}", always_xy=True) + cx, cy = fwd.transform(lon, lat) + half_m = CHIP_PX / 2 * PIXEL_M + bbox = (cx - half_m, cy - half_m, cx + half_m, cy + half_m) + + if time.time() - t0 > timeout_s: + return {"ok": False, "skipped": "STAC search exceeded budget"} + + # ---- S2L2A: 12 bands ------------------------------------------------ + s2_arrs = [] + try: + for b in S2_BANDS: + href = item.assets[b].href + s2_arrs.append(_read_band(href, bbox, epsg)) + except Exception as e: + log.warning("eo_chip: S2 band fetch failed (%s); aborting", e) + return {"ok": False, "err": f"S2 fetch failed: {type(e).__name__}: {e}"} + s2 = np.stack(s2_arrs) # (12, H, W) + if s2.mean() > 1.0: + s2 = s2 / 10000.0 # scale L2A reflectance from int16 to ~[0, 1] + + # ---- S1RTC: 2 polarisations (best effort) --------------------------- + s1: np.ndarray | None = None + s1_meta: dict[str, Any] = {} + if time.time() - t0 < timeout_s: + try: + s1_item = _search_s1(item.datetime, lat, lon) + if s1_item is not None: + s1_arrs = [] + for b in S1_BANDS: + href = s1_item.assets[b].href + s1_arrs.append(_read_band(href, bbox, epsg)) + s1 = np.stack(s1_arrs) + s1_meta = { + "scene_id": s1_item.id, + "datetime": (s1_item.datetime.isoformat() + if s1_item.datetime else None), + } + except Exception as e: + log.warning("eo_chip: S1 fetch best-effort failed: %s", e) + + # ---- DEM: Copernicus 30 m via planetary_computer (best effort) ------ + dem: np.ndarray | None = None + if time.time() - t0 < timeout_s: + try: + import planetary_computer as pc + from pystac_client import Client + client = Client.open( + "https://planetarycomputer.microsoft.com/api/stac/v1", + modifier=pc.sign_inplace, + ) + dem_search = client.search( + collections=["cop-dem-glo-30"], + bbox=[lon - 0.02, lat - 0.02, lon + 0.02, lat + 0.02], + max_items=1, + ) + dem_items = list(dem_search.items()) + if dem_items: + href = dem_items[0].assets["data"].href + dem = _read_band(href, bbox, epsg) + dem = dem[None, :, :] # add channel dim + except Exception as e: + log.warning("eo_chip: DEM fetch best-effort failed: %s", e) + + return { + "ok": True, + "lat": lat, "lon": lon, + "epsg": epsg, "chip_px": CHIP_PX, "pixel_m": PIXEL_M, + "s2": s2, "s1": s1, "dem": dem, + "s2_meta": { + "scene_id": item.id, + "datetime": (item.datetime.isoformat() if item.datetime else None), + "cloud_cover": cc, + }, + "s1_meta": s1_meta, + "elapsed_s": round(time.time() - t0, 2), + } + + +def _to_terramind_tensors(modalities: dict[str, Any]) -> dict[str, Any]: + """Shape numpy modality arrays into the (B, C, T, H, W) tensors + TerraMind expects with `temporal_n_timestamps=4`. Single-timestep + fetches get tiled to T=4 — same observation in every slot. + """ + import torch + s2 = modalities["s2"] # (12, H, W) + s2_t = torch.from_numpy(s2).float().unsqueeze(1) # (12, 1, H, W) + s2_t = s2_t.repeat(1, N_TIMESTEPS, 1, 1).unsqueeze(0) # (1, 12, T, H, W) + chips = {"S2L2A": s2_t} + if modalities.get("s1") is not None: + s1 = modalities["s1"] # (2, H, W) + s1_t = torch.from_numpy(s1).float().unsqueeze(1) + s1_t = s1_t.repeat(1, N_TIMESTEPS, 1, 1).unsqueeze(0) + chips["S1RTC"] = s1_t + if modalities.get("dem") is not None: + dem = modalities["dem"] # (1, H, W) + dem_t = torch.from_numpy(dem).float().unsqueeze(1) + dem_t = dem_t.repeat(1, N_TIMESTEPS, 1, 1).unsqueeze(0) + chips["DEM"] = dem_t + return chips + + +def _fetch_and_build(lat: float, lon: float, timeout_s: float) -> dict[str, Any]: + """Inner fetch + tensor build, run inside a bounded thread.""" + with _FETCH_LOCK: + try: + modalities = _fetch_modalities(lat, lon, timeout_s=timeout_s) + except Exception as e: + log.exception("eo_chip: fetch failed") + return {"ok": False, "err": f"{type(e).__name__}: {e}"} + if not modalities.get("ok"): + return modalities + try: + modalities["tensors"] = _to_terramind_tensors(modalities) + except Exception as e: + log.exception("eo_chip: tensor build failed") + return {"ok": False, + "err": f"tensor build failed: {type(e).__name__}: {e}"} + # Compute the chip's WGS84 bbox so downstream TerraMind specialists + # can polygonise their predictions onto the map. The chip is + # CHIP_PX × CHIP_PX at PIXEL_M (10 m) in the scene's UTM zone; + # reproject the four corners to EPSG:4326 and use the + # axis-aligned envelope. + try: + from pyproj import Transformer + half_m = (CHIP_PX * PIXEL_M) / 2.0 + t_to_utm = Transformer.from_crs( + "EPSG:4326", f"EPSG:{modalities['epsg']}", always_xy=True) + t_to_4326 = Transformer.from_crs( + f"EPSG:{modalities['epsg']}", "EPSG:4326", always_xy=True) + cx, cy = t_to_utm.transform(lon, lat) + corners_utm = [ + (cx - half_m, cy - half_m), + (cx - half_m, cy + half_m), + (cx + half_m, cy - half_m), + (cx + half_m, cy + half_m), + ] + corners_ll = [t_to_4326.transform(x, y) for x, y in corners_utm] + lons = [c[0] for c in corners_ll] + lats = [c[1] for c in corners_ll] + modalities["bounds_4326"] = ( + min(lons), min(lats), max(lons), max(lats)) + except Exception: + log.exception("eo_chip: bounds_4326 reprojection failed") + return modalities + + +def fetch(lat: float, lon: float, timeout_s: float = 60.0) -> dict[str, Any]: + """Run the chip pipeline. Always returns a dict with at minimum + `{ok, skipped|err, ...}`; on success the dict carries the + co-registered numpy arrays plus `tensors` (the TerraMind-shaped + torch dict). + + Runs in a daemon thread so that STAC searches and COG band downloads + (which use requests/rioxarray without per-call timeouts) are bounded + by a hard wall-clock deadline even when the network hangs. + """ + if not ENABLE: + return {"ok": False, "skipped": "RIPRAP_EO_CHIP_ENABLE=0"} + if not _DEPS_OK: + return {"ok": False, + "skipped": f"deps unavailable on this deployment: " + f"{_DEPS_MISSING}"} + # Hard wall-clock cap: pystac_client / rioxarray COG reads don't expose + # uniform per-request timeouts, so we bound the whole pipeline here. + hard_timeout = timeout_s + 15.0 + # Propagate the parent thread's emissions tracker into the worker so + # any inference._post calls made inside _fetch_and_build are recorded. + from app import emissions as _emissions + _parent_tracker = _emissions.current() + with concurrent.futures.ThreadPoolExecutor( + max_workers=1, + initializer=lambda t=_parent_tracker: _emissions.install(t), + ) as pool: + future = pool.submit(_fetch_and_build, lat, lon, timeout_s) + try: + return future.result(timeout=hard_timeout) + except concurrent.futures.TimeoutError: + log.warning("eo_chip: hard timeout after %.0fs (STAC/COG hung)", hard_timeout) + return {"ok": False, "skipped": f"eo_chip timed out after {hard_timeout:.0f}s"} diff --git a/app/context/floodnet.py b/app/context/floodnet.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6c107265236dbe03063056d6bc95851d43f6a6 --- /dev/null +++ b/app/context/floodnet.py @@ -0,0 +1,148 @@ +"""FloodNet NYC — live ultrasonic flood sensor network. + +Hasura GraphQL endpoint, no auth, ~350 sensors. Used for: + - sensors_near(lat, lon, radius_m) → list of deployments + - flood_events_for(deployment_ids, since) → labeled flood events per sensor +""" +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import Any + +import httpx + +URL = "https://api.floodnet.nyc/v1/graphql" +DOC_ID = "floodnet" +CITATION = "FloodNet NYC ultrasonic depth sensors (api.floodnet.nyc)" + + +@dataclass +class Sensor: + deployment_id: str + name: str + street: str + borough: str + status: str + deployed_at: str | None + lat: float | None = None + lon: float | None = None + + +@dataclass +class FloodEvent: + deployment_id: str + start_time: str + end_time: str | None + max_depth_mm: int | None + label: str | None + + +def _gql(query: str, variables: dict[str, Any]) -> dict: + r = httpx.post(URL, json={"query": query, "variables": variables}, + timeout=20, verify=False) + r.raise_for_status() + j = r.json() + if "errors" in j: + raise RuntimeError(f"FloodNet GraphQL error: {j['errors']}") + return j["data"] + + +_NEAR_Q = """ +query Near($lat: Float!, $lon: Float!, $r: Float!) { + deployments_within_radius(args:{lat:$lat, lon:$lon, radius_meters:$r}, + order_by:{date_deployed: asc}) { + deployment_id + name + sensor_address_street + sensor_address_borough + sensor_status + date_deployed + location + } +}""" + + +def _parse_location(loc) -> tuple[float | None, float | None]: + """Hasura PostGIS geometry returned as a GeoJSON object.""" + if not loc or not isinstance(loc, dict): + return None, None + coords = loc.get("coordinates") + if not coords or len(coords) < 2: + return None, None + return coords[1], coords[0] # (lat, lon) from (lon, lat) + + +def sensors_near(lat: float, lon: float, radius_m: float = 1000) -> list[Sensor]: + d = _gql(_NEAR_Q, {"lat": lat, "lon": lon, "r": radius_m}) + out = [] + for row in d["deployments_within_radius"]: + slat, slon = _parse_location(row.get("location")) + out.append(Sensor( + deployment_id=row["deployment_id"], + name=row["name"] or "", + street=row.get("sensor_address_street") or "", + borough=row.get("sensor_address_borough") or "", + status=row.get("sensor_status") or "", + deployed_at=row.get("date_deployed"), + lat=slat, + lon=slon, + )) + return out + + +_EVENTS_Q = """ +query Events($ids: [String!], $since: timestamp!) { + sensor_events(where:{ + deployment_id:{_in:$ids}, + start_time:{_gte:$since}, + label:{_eq:"flood"} + }, order_by:{start_time: desc}, limit: 200) { + deployment_id + start_time + end_time + max_depth_proc_mm + label + } +}""" + + +def flood_events_for(deployment_ids: list[str], + since: datetime | None = None) -> list[FloodEvent]: + if not deployment_ids: + return [] + if since is None: + since = datetime.now(timezone.utc) - timedelta(days=365 * 3) + d = _gql(_EVENTS_Q, { + "ids": deployment_ids, + "since": since.isoformat(timespec="seconds").replace("+00:00", ""), + }) + return [ + FloodEvent( + deployment_id=row["deployment_id"], + start_time=row["start_time"], + end_time=row.get("end_time"), + max_depth_mm=row.get("max_depth_proc_mm"), + label=row.get("label"), + ) + for row in d["sensor_events"] + ] + + +def summary_for_point(lat: float, lon: float, radius_m: float = 600) -> dict: + """One-shot summary used by the FSM node and the cited paragraph.""" + sensors = sensors_near(lat, lon, radius_m) + ids = [s.deployment_id for s in sensors] + events = flood_events_for(ids) + by_dep: dict[str, list[FloodEvent]] = {} + for e in events: + by_dep.setdefault(e.deployment_id, []).append(e) + peak = max((e for e in events if e.max_depth_mm is not None), + key=lambda e: e.max_depth_mm or 0, default=None) + return { + "n_sensors": len(sensors), + "sensors": [vars(s) for s in sensors], + "n_flood_events_3y": len(events), + "n_sensors_with_events": len(by_dep), + "peak_event": vars(peak) if peak else None, + } diff --git a/app/context/gliner_extract.py b/app/context/gliner_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..9ffa9c1d1f54cef2a9edbcd282eaca5f57454d18 --- /dev/null +++ b/app/context/gliner_extract.py @@ -0,0 +1,147 @@ +"""GLiNER (urchade/gliner_medium-v2.1) typed-entity extraction over the +RAG retriever's top paragraphs. + +Adds structured fields to the reconciler's grounding context. For each +RAG chunk the specialist emits, GLiNER produces a list of typed spans +with one of five labels: + + nyc_location (e.g. "Coney Island") + dollar_amount (e.g. "$5.6 million") + date_range (e.g. "fiscal year 2025-2027") + agency (e.g. "NYC DEP") + infrastructure_project (e.g. "Bluebelt expansion") + +The doc_id for emission is `gliner_` where `` is the +RAG chunk's doc_id stripped of its `rag_` prefix. So `rag_comptroller` +becomes `gliner_comptroller`. The reconciler can then cite typed +fields with `[gliner_comptroller]`. + +License: Apache-2.0 — `urchade/gliner_medium-v2.1` (NOT the +`gliner_base` variant, which is CC-BY-NC-4.0). See +experiments/shared/licenses.md. +""" + +from __future__ import annotations + +import logging +import os +from dataclasses import dataclass + +log = logging.getLogger("riprap.gliner") + +ENTITY_LABELS = [ + "nyc_location", + "dollar_amount", + "date_range", + "agency", + "infrastructure_project", +] + +DEFAULT_THRESHOLD = float(os.environ.get("RIPRAP_GLINER_THRESHOLD", "0.45")) +MODEL_NAME = os.environ.get("RIPRAP_GLINER_MODEL", "urchade/gliner_medium-v2.1") +ENABLE = os.environ.get("RIPRAP_GLINER_ENABLE", "1").lower() in ("1", "true", "yes") + +_MODEL = None # lazy + + +@dataclass +class Extraction: + label: str + text: str + score: float + + +def _ensure_model(): + """Lazy GLiNER load. Returns None if disabled or load fails so + callers can silently fall back to no-op.""" + global _MODEL + if not ENABLE: + return None + if _MODEL is not None: + return _MODEL + try: + from gliner import GLiNER + log.info("gliner: loading %s", MODEL_NAME) + _MODEL = GLiNER.from_pretrained(MODEL_NAME) + except Exception: + log.exception("gliner: load failed; specialist will no-op") + _MODEL = False # sentinel + return _MODEL or None + + +def warm(): + _ensure_model() + + +def _source_short(rag_doc_id: str) -> str: + """`rag_comptroller` -> `comptroller`. Anything not prefixed `rag_` + passes through unchanged.""" + return rag_doc_id[4:] if rag_doc_id.startswith("rag_") else rag_doc_id + + +def extract_for_chunk(text: str, threshold: float = DEFAULT_THRESHOLD) -> list[Extraction]: + if not text: + return [] + + # v0.4.5 — try the MI300X service first. The remote handles its + # own GLiNER load; this lets cpu-basic surfaces run typed + # extraction without baking gliner into the image. + try: + from app import inference as _inf + if _inf.remote_enabled(): + remote = _inf.gliner_extract(text, ENTITY_LABELS) + if remote.get("ok"): + return [ + Extraction(label=e["label"], text=e["text"], + score=float(e.get("score", 0))) + for e in remote.get("entities", []) + if e.get("score", 0) >= threshold + ] + except _inf.RemoteUnreachable as e: + log.info("gliner: remote unreachable (%s); local fallback", e) + except Exception: + log.exception("gliner: remote call failed; local fallback") + + model = _ensure_model() + if model is None: + return [] + raw = model.predict_entities(text, ENTITY_LABELS, threshold=threshold) + return [Extraction(label=r["label"], text=r["text"], + score=float(r["score"])) for r in raw] + + +def extract_for_rag_hits(hits: list[dict], + threshold: float = DEFAULT_THRESHOLD, + max_hits: int = 3) -> dict[str, dict]: + """Run GLiNER on the top-`max_hits` RAG hits. Returns a dict keyed by + short source id (e.g. "comptroller") with the structured payload + that the FSM stores into state["gliner"] and that + reconcile.build_documents() consumes.""" + out: dict[str, dict] = {} + if not hits: + return out + for h in hits[:max_hits]: + source = _source_short(h.get("doc_id", "rag_unknown")) + ents = extract_for_chunk(h.get("text", ""), threshold=threshold) + if not ents: + continue + # Dedup verbatim repeats (common in agency PDFs that repeat + # "DEP" 13 times in a methodology section). + seen = set() + deduped: list[Extraction] = [] + for e in ents: + key = (e.label, e.text.lower()) + if key in seen: + continue + seen.add(key) + deduped.append(e) + out[source] = { + "rag_doc_id": h.get("doc_id"), + "title": h.get("title"), + "paragraph_excerpt": h.get("text", "")[:240] + + ("…" if len(h.get("text", "")) > 240 else ""), + "n_entities": len(deduped), + "entities": [{"label": e.label, "text": e.text, + "score": round(e.score, 3)} for e in deduped], + } + return out diff --git a/app/context/microtopo.py b/app/context/microtopo.py new file mode 100644 index 0000000000000000000000000000000000000000..01dcdfa20c666f428298c02c96aebbe6b081779e --- /dev/null +++ b/app/context/microtopo.py @@ -0,0 +1,274 @@ +"""LiDAR/DEM-derived micro-topography specialist. + +Reads a window from a precomputed NYC-wide DEM (data/nyc_dem_30m.tif) +fetched from USGS 3DEP via py3dep. Computes per-address terrain numbers +that the static FEMA/DEP scenario maps don't expose. + +Metrics (all derived from the same small AOI raster): + + point_elev_m elevation at the address (m) + rel_elev_pct_750m percentile of point elev in a 750-m radius + rel_elev_pct_200m percentile of point elev in a 200-m radius + (block-scale "is this a bowl?") + basin_relief_m max-elev in 750-m AOI minus point elev + aoi_min_m, aoi_max_m for context + resolution_m + +We deliberately stop at "shape-of-the-terrain" metrics rather than full +hydrology — depression-fill / D8 flow accumulation on a flat coastal +DEM are noisy and slow. Percentile + relief is what the reconciler +actually needs to write a useful sentence. +""" +from __future__ import annotations + +import logging +import warnings +from dataclasses import dataclass +from pathlib import Path + +import numpy as np + +warnings.filterwarnings("ignore") + +log = logging.getLogger("riprap.microtopo") + +DOC_ID = "microtopo" +CITATION = "USGS 3DEP 30 m DEM (precomputed citywide GeoTIFF, WGS84)" + +DATA_DIR = Path(__file__).resolve().parent.parent.parent / "data" +DEM_PATH = DATA_DIR / "nyc_dem_30m.tif" +TWI_PATH = DATA_DIR / "twi.tif" +HAND_PATH = DATA_DIR / "hand.tif" + + +@dataclass +class Microtopo: + point_elev_m: float + rel_elev_pct_750m: float # 0..100 + rel_elev_pct_200m: float # 0..100 + basin_relief_m: float + aoi_min_m: float + aoi_max_m: float + aoi_radius_m: int + resolution_m: int + # Hydrology indices computed on the same DEM (whitebox-workflows) + twi: float | None = None # Topographic Wetness Index, ln(SCA / tan(slope)) + hand_m: float | None = None # Height Above Nearest Drainage (m) + + +def _percentile_in_window(arr: np.ndarray, iy: int, ix: int, point_val: float, + window_radius_cells: int) -> float: + H, W = arr.shape + y0 = max(0, iy - window_radius_cells) + y1 = min(H, iy + window_radius_cells + 1) + x0 = max(0, ix - window_radius_cells) + x1 = min(W, ix + window_radius_cells + 1) + sub = arr[y0:y1, x0:x1] + finite = sub[np.isfinite(sub)] + if finite.size == 0: + return float("nan") + return float((finite < point_val).sum()) / finite.size * 100.0 + + +_DEM_CACHE: dict = {} + + +def _read_full_raster(path: Path) -> tuple[np.ndarray | None, dict | None]: + import rasterio + if not path.exists(): + return None, None + with rasterio.open(path) as ds: + arr = ds.read(1).astype("float32") + nodata = ds.nodata + meta = {"H": ds.height, "W": ds.width, + "transform": ds.transform, "crs": ds.crs, "nodata": nodata} + if nodata is not None: + arr = np.where(arr == nodata, np.nan, arr) + return arr, meta + + +def _load_dem(): + """Read the precomputed NYC DEM + TWI + HAND rasters into memory. + + All three are aligned (same grid, same transform). We hold them as + numpy arrays so per-query slicing is safe under threading. + """ + if "arr" in _DEM_CACHE: + return _DEM_CACHE + arr, meta = _read_full_raster(DEM_PATH) + if arr is None: + log.warning("microtopo DEM not found at %s — run scripts/fetch_nyc_dem.py", DEM_PATH) + return None + twi, _ = _read_full_raster(TWI_PATH) + hand, _ = _read_full_raster(HAND_PATH) + _DEM_CACHE.update({ + "arr": arr, "H": meta["H"], "W": meta["W"], + "transform": meta["transform"], "crs": meta["crs"], + "twi": twi, "hand": hand, + }) + note = [] + if twi is not None: note.append(f"TWI {TWI_PATH.name}") + if hand is not None: note.append(f"HAND {HAND_PATH.name}") + log.info("microtopo: loaded NYC DEM %s (%dx%d, %s); aux: %s", + DEM_PATH.name, meta["H"], meta["W"], meta["crs"], + ", ".join(note) if note else "(none — algorithmic only)") + return _DEM_CACHE + + +def warm(): + _load_dem() + + +def _row_col(transform, lat: float, lon: float) -> tuple[int, int]: + """Inverse-affine: WGS84 (lon,lat) -> raster (row, col). + Mirrors rasterio.transform.rowcol but without holding a dataset handle. + """ + # Diagonal affine (north-up raster): x = a*col + c, y = e*row + f. + a, c = transform.a, transform.c + e, f = transform.e, transform.f + col = int(round((lon - c) / a)) + row = int(round((lat - f) / e)) + return row, col + + +def microtopo_at(lat: float, lon: float, radius_m: int = 750) -> Microtopo | None: + state = _load_dem() + if state is None: + return None + arr_full = state["arr"] + transform = state["transform"] + + try: + row, col = _row_col(transform, lat, lon) + except Exception as e: + log.warning("microtopo index failed: %s", e) + return None + + res_m = abs(transform.a) * 111_000.0 * np.cos(np.radians(lat)) + cells_radius = max(2, int(np.ceil(radius_m / max(res_m, 1.0)))) + + H, W = state["H"], state["W"] + y0 = max(0, row - cells_radius); y1 = min(H, row + cells_radius + 1) + x0 = max(0, col - cells_radius); x1 = min(W, col + cells_radius + 1) + if y1 <= y0 or x1 <= x0: + return None + + arr = arr_full[y0:y1, x0:x1].copy() + + iy = row - y0 + ix = col - x0 + if not (0 <= iy < arr.shape[0] and 0 <= ix < arr.shape[1]): + return None + + point_elev = float(arr[iy, ix]) + if not np.isfinite(point_elev): + for r in range(1, 6): + ya, yb = max(0, iy - r), min(arr.shape[0], iy + r + 1) + xa, xb = max(0, ix - r), min(arr.shape[1], ix + r + 1) + sub = arr[ya:yb, xa:xb] + if np.isfinite(sub).any(): + point_elev = float(np.nanmean(sub)) + break + else: + return None + + finite = arr[np.isfinite(arr)] + if finite.size == 0: + return None + aoi_min = float(finite.min()) + aoi_max = float(finite.max()) + + pct_750 = float((finite < point_elev).sum()) / finite.size * 100.0 + cells_200m = max(1, int(round(200 / max(res_m, 1.0)))) + pct_200 = _percentile_in_window(arr, iy, ix, point_elev, cells_200m) + + twi_arr = state.get("twi") + hand_arr = state.get("hand") + twi_v: float | None = None + hand_v: float | None = None + if twi_arr is not None and 0 <= row < H and 0 <= col < W: + v = float(twi_arr[row, col]) + twi_v = round(v, 2) if np.isfinite(v) else None + if hand_arr is not None and 0 <= row < H and 0 <= col < W: + v = float(hand_arr[row, col]) + hand_v = round(v, 2) if np.isfinite(v) else None + + return Microtopo( + point_elev_m=round(point_elev, 2), + rel_elev_pct_750m=round(pct_750, 1), + rel_elev_pct_200m=round(pct_200, 1), + basin_relief_m=round(aoi_max - point_elev, 2), + aoi_min_m=round(aoi_min, 2), + aoi_max_m=round(aoi_max, 2), + aoi_radius_m=radius_m, + resolution_m=int(round(res_m)), + twi=twi_v, + hand_m=hand_v, + ) + + +def microtopo_for_polygon(polygon, polygon_crs: str = "EPSG:4326") -> dict | None: + """Polygon-mode aggregation: distributional summary of the DEM/HAND/TWI + rasters clipped to the polygon. Returns medians + fraction of cells + in flood-prone bands. Used for neighborhood-mode queries.""" + state = _load_dem() + if state is None: + return None + try: + import rasterio + from rasterio.mask import mask as rio_mask + except Exception: + return None + import geopandas as gpd + + poly = gpd.GeoDataFrame(geometry=[polygon], crs=polygon_crs).to_crs("EPSG:4326") + geom = [poly.iloc[0].geometry.__geo_interface__] + + def _stats(path: Path) -> dict | None: + if not path.exists(): + return None + try: + with rasterio.open(path) as src: + clipped, _ = rio_mask(src, geom, crop=True, filled=False) + arr = clipped[0] + vals = arr.compressed() if hasattr(arr, "compressed") else arr.flatten() + vals = vals[np.isfinite(vals)] + if vals.size == 0: + return None + return { + "n_cells": int(vals.size), + "min": float(np.min(vals)), + "median": float(np.median(vals)), + "p10": float(np.percentile(vals, 10)), + "p90": float(np.percentile(vals, 90)), + "max": float(np.max(vals)), + "raw": vals, + } + except Exception as e: + log.warning("polygon raster mask failed for %s: %r", path.name, e) + return None + + elev = _stats(DEM_PATH) + hand = _stats(HAND_PATH) + twi = _stats(TWI_PATH) + if elev is None: + return None + + # Fraction of polygon cells in canonical flood-prone bands + frac_hand_lt1 = ( + round(float((hand["raw"] < 1.0).mean()), 4) if hand else None + ) + frac_twi_gt10 = ( + round(float((twi["raw"] > 10.0).mean()), 4) if twi else None + ) + return { + "n_cells": elev["n_cells"], + "elev_min_m": round(elev["min"], 2), + "elev_median_m": round(elev["median"], 2), + "elev_p10_m": round(elev["p10"], 2), + "elev_max_m": round(elev["max"], 2), + "hand_median_m": round(hand["median"], 2) if hand else None, + "twi_median": round(twi["median"], 2) if twi else None, + "frac_hand_lt1": frac_hand_lt1, + "frac_twi_gt10": frac_twi_gt10, + } diff --git a/app/context/noaa_tides.py b/app/context/noaa_tides.py new file mode 100644 index 0000000000000000000000000000000000000000..05ce8b1aa3fe1e3bf8d244568dad0747361f842e --- /dev/null +++ b/app/context/noaa_tides.py @@ -0,0 +1,110 @@ +"""NOAA CO-OPS Tides & Currents — live coastal water level. + +api.tidesandcurrents.noaa.gov, no auth, 6-min cadence. + +We pick the nearest of three NYC-region stations to the queried address: + - 8518750 The Battery, NY + - 8516945 Kings Point, NY (Long Island Sound entrance) + - 8531680 Sandy Hook, NJ (NY Harbor approach) + +The verified-water-level API returns instantaneous water elevation +relative to MLLW (Mean Lower Low Water — the local tidal datum). To +distinguish "high tide" from "storm surge" we also fetch the published +predicted tide and report the residual. +""" +from __future__ import annotations + +from dataclasses import dataclass +from math import asin, cos, radians, sin, sqrt + +import httpx + +DOC_ID = "noaa_tides" +CITATION = "NOAA CO-OPS Tides & Currents (api.tidesandcurrents.noaa.gov)" +URL = "https://api.tidesandcurrents.noaa.gov/api/prod/datagetter" + +STATIONS = [ + # (id, name, lat, lon) + # NYC harbor + Long Island Sound + ("8518750", "The Battery, NY", 40.7006, -74.0142), + ("8516945", "Kings Point, NY", 40.8103, -73.7649), + ("8531680", "Sandy Hook, NJ", 40.4669, -74.0094), + # Hudson tidal corridor (head-of-tide is Troy / Albany; Hudson is tidal + # all the way up to the Federal Lock at Troy) + ("8518995", "Albany, NY (Hudson)", 42.6469, -73.7464), + ("8518962", "Turkey Point Hudson, NY", 41.7569, -73.9433), + ("8519483", "West Point, NY", 41.3845, -73.9536), +] + + +@dataclass +class TideReading: + station_id: str + station_name: str + distance_km: float + observed_ft: float | None # current water level above MLLW + predicted_ft: float | None # astronomical prediction at same instant + residual_ft: float | None # observed - predicted (≈ storm surge) + obs_time: str | None + error: str | None = None + + +def _haversine_km(lat1, lon1, lat2, lon2) -> float: + R = 6371.0 + p1, p2 = radians(lat1), radians(lat2) + dp = radians(lat2 - lat1); dl = radians(lon2 - lon1) + a = sin(dp/2)**2 + cos(p1)*cos(p2)*sin(dl/2)**2 + return 2 * R * asin(sqrt(a)) + + +def _nearest_station(lat: float, lon: float): + return min(STATIONS, key=lambda s: _haversine_km(lat, lon, s[2], s[3])) + + +def _fetch(station_id: str, product: str) -> dict: + r = httpx.get(URL, params={ + "date": "latest", "station": station_id, "product": product, + "datum": "MLLW", "units": "english", "time_zone": "lst_ldt", + "format": "json", + }, timeout=8.0) + r.raise_for_status() + return r.json() + + +def reading_at(lat: float, lon: float) -> TideReading: + sid, name, slat, slon = _nearest_station(lat, lon) + dist_km = round(_haversine_km(lat, lon, slat, slon), 1) + out = TideReading(station_id=sid, station_name=name, distance_km=dist_km, + observed_ft=None, predicted_ft=None, residual_ft=None, + obs_time=None) + try: + obs = _fetch(sid, "water_level").get("data") or [] + pred = _fetch(sid, "predictions").get("predictions") or [] + if obs: + out.observed_ft = round(float(obs[0]["v"]), 2) + out.obs_time = obs[0].get("t") + if pred: + out.predicted_ft = round(float(pred[0]["v"]), 2) + if out.observed_ft is not None and out.predicted_ft is not None: + out.residual_ft = round(out.observed_ft - out.predicted_ft, 2) + except Exception as e: + out.error = str(e) + return out + + +def summary_for_point(lat: float, lon: float) -> dict: + r = reading_at(lat, lon) + # Look up station coords for the map marker. + sta = next((s for s in STATIONS if s[0] == r.station_id), None) + return { + "station_id": r.station_id, + "station_name": r.station_name, + "station_lat": sta[2] if sta else None, + "station_lon": sta[3] if sta else None, + "distance_km": r.distance_km, + "observed_ft_mllw": r.observed_ft, + "predicted_ft_mllw": r.predicted_ft, + "residual_ft": r.residual_ft, + "obs_time": r.obs_time, + "error": r.error, + } diff --git a/app/context/npcc4_slr.py b/app/context/npcc4_slr.py new file mode 100644 index 0000000000000000000000000000000000000000..f061f830dbbd72980deffa43cff69d87603d8d24 --- /dev/null +++ b/app/context/npcc4_slr.py @@ -0,0 +1,42 @@ +"""NPCC4 sea-level rise projections for NYC (static lookup). + +Source: New York City Panel on Climate Change 4th Assessment (2024), +Chapter 3, Table 3.2 — sea-level rise relative to 2000–2004 baseline, +Battery Tide Gauge (NOAA 8518750), primary NYC harbor reference. + +Values are in inches above the 2000–2004 mean. The NPCC4 uses a +probabilistic framework across RCP/SSP scenarios; the table excerpted +here represents the "likely range" (10th–90th) plus the high-end +"extreme" scenario (99th). +""" + +DOC_ID = "npcc4_slr" +CITATION = ( + "New York City Panel on Climate Change 4th Assessment (NPCC4 2024), " + "Chapter 3 — Sea Level Rise, Table 3.2. " + "Published by the New York Academy of Sciences. " + "Reference gauge: NOAA Battery (8518750), baseline 2000–2004." +) + +# Sea-level rise projections in INCHES above the 2000–2004 baseline, +# Battery Tide Gauge. Percentiles: 10th (low), 50th (mid), 90th (high), +# 99th (extreme). All values from NPCC4 (2024) Ch. 3 Table 3.2. +_TABLE_IN = { + 2050: {10: 8, 50: 15, 90: 29, 99: 40}, + 2100: {10: 13, 50: 31, 90: 65, 99: 96}, +} + + +def _in_to_m(inches: float) -> float: + return round(inches * 0.0254, 2) + + +def get_projections() -> dict: + """Return NPCC4 SLR projection dict, always available (static table).""" + result: dict = {"available": True, "baseline": "2000–2004", "gauge": "NOAA Battery (8518750)"} + for year, pcts in _TABLE_IN.items(): + result[str(year)] = { + str(pct): {"in": v, "m": _in_to_m(v)} + for pct, v in pcts.items() + } + return result diff --git a/app/context/nws_alerts.py b/app/context/nws_alerts.py new file mode 100644 index 0000000000000000000000000000000000000000..0d25a239131d89235f1d35a7f506d87535b568ca --- /dev/null +++ b/app/context/nws_alerts.py @@ -0,0 +1,71 @@ +"""NWS API — active alerts at a point. + +api.weather.gov/alerts/active?point={lat},{lon}, no auth, JSON. +A User-Agent header is required (NWS rate-limits anonymous traffic). + +We surface only flood-relevant categories so the doc the reconciler +sees is short and on-topic. +""" +from __future__ import annotations + +from typing import Any + +import httpx + +DOC_ID = "nws_alerts" +CITATION = "NWS public alert API (api.weather.gov/alerts)" + +USER_AGENT = "Riprap-NYC/0.1 (civic-flood-tool; +https://huggingface.co/spaces/msradam/riprap-nyc)" + +_FLOOD_EVENT_KEYWORDS = ( + "flood", "flash flood", "coastal flood", "high surf", "storm surge", + "hurricane", "tropical storm", "tornado warning", # high-impact context + "rip current", +) + + +def _is_flood_relevant(event_name: str) -> bool: + e = (event_name or "").lower() + return any(k in e for k in _FLOOD_EVENT_KEYWORDS) + + +def alerts_at(lat: float, lon: float) -> list[dict[str, Any]]: + r = httpx.get( + "https://api.weather.gov/alerts/active", + params={"point": f"{lat:.4f},{lon:.4f}"}, + headers={"User-Agent": USER_AGENT, "Accept": "application/geo+json"}, + timeout=8.0, + ) + r.raise_for_status() + out = [] + for f in r.json().get("features", []): + p = f.get("properties", {}) or {} + event = p.get("event") or "" + if not _is_flood_relevant(event): + continue + out.append({ + "id": p.get("id"), + "event": event, + "severity": p.get("severity"), + "urgency": p.get("urgency"), + "certainty": p.get("certainty"), + "headline": p.get("headline"), + "sent": p.get("sent"), + "effective": p.get("effective"), + "expires": p.get("expires"), + "sender_name": p.get("senderName"), + "areaDesc": p.get("areaDesc"), + }) + return out + + +def summary_for_point(lat: float, lon: float) -> dict: + try: + active = alerts_at(lat, lon) + except Exception as e: + return {"n_active": 0, "alerts": [], "error": str(e)} + return { + "n_active": len(active), + "alerts": active, + "error": None, + } diff --git a/app/context/nws_obs.py b/app/context/nws_obs.py new file mode 100644 index 0000000000000000000000000000000000000000..d9752349a9c4e5f5600758aa76bcac02e677a74a --- /dev/null +++ b/app/context/nws_obs.py @@ -0,0 +1,108 @@ +"""NWS station observations — latest hourly METAR for the nearest NYC airport. + +api.weather.gov/stations/{id}/observations/latest. + +Five NYC-region ASOS stations cover the city; we pick the nearest. +Most useful field for flood context is hourly precipitation (the +`precipitationLastHour` quantity, mm). The latest observation is +typically <60 min old. +""" +from __future__ import annotations + +from dataclasses import dataclass +from math import asin, cos, radians, sin, sqrt + +import httpx + +DOC_ID = "nws_obs" +CITATION = "NWS station observations API (api.weather.gov/stations)" + +USER_AGENT = "Riprap-NYC/0.1 (civic-flood-tool; +https://huggingface.co/spaces/msradam/riprap-nyc)" + +# NYC + Hudson Corridor ASOS stations. Picker is haversine-nearest, so adding +# upstate stations enables Albany / Poughkeepsie / Newburgh queries without +# breaking NYC behaviour (NYC stations stay closer for NYC lat/lon). +STATIONS = [ + # NYC region + ("KNYC", "Central Park, NY", 40.7794, -73.9692), + ("KLGA", "LaGuardia Airport, NY", 40.7794, -73.8800), + ("KJFK", "JFK Airport, NY", 40.6413, -73.7781), + ("KEWR", "Newark Liberty, NJ", 40.6925, -74.1687), + ("KFRG", "Republic Farmingdale, NY", 40.7288, -73.4134), + # Hudson Corridor (south → north) + ("KHPN", "White Plains, NY", 41.0670, -73.7076), + ("KSWF", "Newburgh-Stewart, NY", 41.5042, -74.1048), + ("KPOU", "Poughkeepsie, NY", 41.6262, -73.8842), + ("KALB", "Albany Intl, NY", 42.7475, -73.8025), +] + + +@dataclass +class Obs: + station_id: str + station_name: str + distance_km: float + obs_time: str | None + temp_c: float | None + precip_last_hour_mm: float | None + precip_last_3h_mm: float | None + precip_last_6h_mm: float | None + error: str | None = None + + +def _haversine_km(lat1, lon1, lat2, lon2) -> float: + R = 6371.0 + p1, p2 = radians(lat1), radians(lat2) + dp = radians(lat2 - lat1); dl = radians(lon2 - lon1) + a = sin(dp/2)**2 + cos(p1)*cos(p2)*sin(dl/2)**2 + return 2 * R * asin(sqrt(a)) + + +def _val_mm(props, key) -> float | None: + """NWS returns {value: ..., unitCode: 'wmoUnit:mm'} per quantity. Convert + to mm; if value is null, return None.""" + q = (props or {}).get(key) or {} + v = q.get("value") + if v is None: + return None + return round(float(v), 2) + + +def obs_at(lat: float, lon: float) -> Obs: + sid, name, slat, slon = min(STATIONS, key=lambda s: _haversine_km(lat, lon, s[2], s[3])) + dist_km = round(_haversine_km(lat, lon, slat, slon), 1) + out = Obs(station_id=sid, station_name=name, distance_km=dist_km, + obs_time=None, temp_c=None, + precip_last_hour_mm=None, precip_last_3h_mm=None, + precip_last_6h_mm=None) + try: + r = httpx.get( + f"https://api.weather.gov/stations/{sid}/observations/latest", + headers={"User-Agent": USER_AGENT, "Accept": "application/geo+json"}, + timeout=8.0, + ) + r.raise_for_status() + p = r.json().get("properties", {}) or {} + out.obs_time = p.get("timestamp") + out.temp_c = _val_mm(p, "temperature") + out.precip_last_hour_mm = _val_mm(p, "precipitationLastHour") + out.precip_last_3h_mm = _val_mm(p, "precipitationLast3Hours") + out.precip_last_6h_mm = _val_mm(p, "precipitationLast6Hours") + except Exception as e: + out.error = str(e) + return out + + +def summary_for_point(lat: float, lon: float) -> dict: + o = obs_at(lat, lon) + return { + "station_id": o.station_id, + "station_name": o.station_name, + "distance_km": o.distance_km, + "obs_time": o.obs_time, + "temp_c": o.temp_c, + "precip_last_hour_mm": o.precip_last_hour_mm, + "precip_last_3h_mm": o.precip_last_3h_mm, + "precip_last_6h_mm": o.precip_last_6h_mm, + "error": o.error, + } diff --git a/app/context/nyc311.py b/app/context/nyc311.py new file mode 100644 index 0000000000000000000000000000000000000000..7762776f66ec9a2eaa38db41a15f6630d6ce440f --- /dev/null +++ b/app/context/nyc311.py @@ -0,0 +1,161 @@ +"""NYC 311 — flood-related complaints around a point. + +Live dataset: erm2-nwe9. Filter by descriptor (the flood signal is in +descriptor, not complaint_type) within a buffer. +""" +from __future__ import annotations + +from collections import Counter +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone + +import httpx + +URL = "https://data.cityofnewyork.us/resource/erm2-nwe9.json" +DOC_ID = "nyc311" +CITATION = "NYC 311 service requests (Socrata erm2-nwe9, 2010-present)" + +FLOOD_DESCRIPTORS = [ + "Street Flooding (SJ)", + "Sewer Backup (Use Comments) (SA)", + "Catch Basin Clogged/Flooding (Use Comments) (SC)", + "Highway Flooding (SH)", + "Manhole Overflow (Use Comments) (SA1)", + "Flooding on Street", + "RAIN GARDEN FLOODING (SRGFLD)", +] + +_DESC_CLAUSE = "(" + " OR ".join(f"descriptor='{d}'" for d in FLOOD_DESCRIPTORS) + ")" + + +@dataclass +class Complaint: + unique_key: str + descriptor: str + created_date: str + address: str | None + status: str | None + lat: float | None = None + lon: float | None = None + + +def complaints_near(lat: float, lon: float, radius_m: float = 200, + since: datetime | None = None, + limit: int = 1000) -> list[Complaint]: + where = f"{_DESC_CLAUSE} AND within_circle(location, {lat}, {lon}, {radius_m})" + if since: + # Socrata floating-timestamp: drop tz suffix + ts = since.replace(tzinfo=None).isoformat(timespec="seconds") + where += f" AND created_date >= '{ts}'" + r = httpx.get(URL, params={ + "$select": "unique_key, descriptor, created_date, incident_address, " + "status, latitude, longitude", + "$where": where, + "$order": "created_date desc", + "$limit": str(limit), + }, timeout=30) + r.raise_for_status() + out = [] + for row in r.json(): + lat = row.get("latitude") + lon = row.get("longitude") + try: + lat = float(lat) if lat is not None else None + lon = float(lon) if lon is not None else None + except Exception: + lat, lon = None, None + out.append(Complaint( + unique_key=row.get("unique_key", ""), + descriptor=row.get("descriptor", ""), + created_date=row.get("created_date", ""), + address=row.get("incident_address"), + status=row.get("status"), + lat=lat, lon=lon, + )) + return out + + +def summary_for_point(lat: float, lon: float, radius_m: float = 200, + years: int = 5) -> dict: + since = datetime.now(timezone.utc) - timedelta(days=365 * years) + cs = complaints_near(lat, lon, radius_m, since=since, limit=2000) + return _summarize(cs, years=years, radius_m=radius_m) + + +def complaints_in_polygon(polygon, polygon_crs: str = "EPSG:4326", + since: datetime | None = None, + limit: int = 5000, + simplify_tolerance: float = 0.0005) -> list[Complaint]: + """Pull flood-related complaints inside an arbitrary polygon via + Socrata's `within_polygon(location, 'MULTIPOLYGON(...)')` predicate. + + NYC NTA polygons can have thousands of vertices and exceed Socrata's + URL length limit (414). We simplify in EPSG:4326 with a default + ~50 m tolerance, which collapses vertex count ~10-20× without + materially changing the contained-points result. + + Polygon must be EPSG:4326 (lat/lon) for the Socrata query. + """ + import geopandas as gpd + g = gpd.GeoDataFrame(geometry=[polygon], crs=polygon_crs).to_crs("EPSG:4326") + geom = g.iloc[0].geometry.simplify(simplify_tolerance, preserve_topology=True) + wkt = geom.wkt + where = f"{_DESC_CLAUSE} AND within_polygon(location, '{wkt}')" + if since: + ts = since.replace(tzinfo=None).isoformat(timespec="seconds") + where += f" AND created_date >= '{ts}'" + r = httpx.get(URL, params={ + "$select": "unique_key, descriptor, created_date, incident_address, status", + "$where": where, + "$order": "created_date desc", + "$limit": str(limit), + }, timeout=60) + r.raise_for_status() + return [ + Complaint( + unique_key=row.get("unique_key", ""), + descriptor=row.get("descriptor", ""), + created_date=row.get("created_date", ""), + address=row.get("incident_address"), + status=row.get("status"), + ) + for row in r.json() + ] + + +def summary_for_polygon(polygon, polygon_crs: str = "EPSG:4326", + years: int = 5) -> dict: + """Polygon-mode aggregation: counts of flood-related 311 complaints + inside the polygon over the trailing window.""" + since = datetime.now(timezone.utc) - timedelta(days=365 * years) + cs = complaints_in_polygon(polygon, polygon_crs=polygon_crs, since=since) + return _summarize(cs, years=years, radius_m=None) + + +def _summarize(cs: list[Complaint], years: int, radius_m: float | None) -> dict: + by_year: Counter = Counter(c.created_date[:4] for c in cs if c.created_date) + by_descriptor: Counter = Counter(c.descriptor for c in cs) + # Cap at 60 most-recent points for the map layer — keeps the SSE + # payload small while still showing meaningful clustering. + points = [ + {"lat": c.lat, "lon": c.lon, + "descriptor": c.descriptor, + "date": c.created_date[:10], + "address": c.address} + for c in cs[:60] + if c.lat is not None and c.lon is not None + ] + return { + "n": len(cs), + "radius_m": radius_m, + "years": years, + "by_year": dict(sorted(by_year.items())), + "by_descriptor": dict(by_descriptor.most_common(6)), + "most_recent": [ + {"date": c.created_date[:10], + "descriptor": c.descriptor, + "address": c.address} + for c in cs[:5] + ], + "points": points, + } diff --git a/app/context/terramind_nyc.py b/app/context/terramind_nyc.py new file mode 100644 index 0000000000000000000000000000000000000000..d4765622a016203999df7bb6472132cceba1d59b --- /dev/null +++ b/app/context/terramind_nyc.py @@ -0,0 +1,485 @@ +"""TerraMind-NYC adapters — LULC and Buildings inference for NYC chips. + +Wraps the Apache-2.0 [`msradam/TerraMind-NYC-Adapters`](https://huggingface.co/msradam/TerraMind-NYC-Adapters) +LoRA family fine-tuned on NYC EO chips (Sentinel-2 L2A + Sentinel-1 RTC ++ Copernicus DEM, temporal stack of 4) on AMD MI300X via AMD Developer +Cloud. Exposes two specialist entry points: + + lulc(s2l2a, s1rtc, dem) -> 5-class macro NYC LULC mask + buildings(s2l2a, s1rtc, dem) -> binary NYC building footprint mask + +The base TerraMind 1.0 weights are downloaded by terratorch on first +call; the LoRA adapter + UNet decoder weights come from the HF repo and +are cached to `~/.cache/huggingface/hub`. + +CHIP-SIZE TRAP. TerraMind's positional embeddings don't generalise off +its training resolution (224×224). Calling `task.model({...})` on a +chip ≠ 224×224 produces silent garbage. We therefore wrap inference +with `terratorch.tasks.tiled_inference.tiled_inference`, which slides +a 224×224 crop window across the chip and stitches per-window logits. +This matches the patch in +`experiments/18_terramind_nyc_lora/shared/inference_ensemble.py` that +the plan flags as required for production. + +Gated by RIPRAP_TERRAMIND_NYC_ENABLE — deployments without the deps +installed (HF Spaces' Py3.10 cone, plain Ollama dev VMs) silently no-op +through the same skipped-result shape every other heavy specialist +emits. + +This module does NOT fetch its own S2/S1/DEM chips. C4 wires it into +the FSM with a shared chip cache so the LULC and Buildings calls +don't each refetch ~150 MB of imagery. +""" +from __future__ import annotations + +import logging +import os +import threading +import time +from typing import Any + +log = logging.getLogger("riprap.terramind_nyc") + +ENABLE = os.environ.get("RIPRAP_TERRAMIND_NYC_ENABLE", "1").lower() in ("1", "true", "yes") +DEVICE = os.environ.get("RIPRAP_TERRAMIND_NYC_DEVICE", "cpu") +ADAPTERS_REPO = "msradam/TerraMind-NYC-Adapters" + +# Per-task config knobs the HF README's quick-start fixes for these +# adapters. Mirrored from experiments/18_terramind_nyc_lora/adapters/*/ +# config.yaml so a single source of truth lives next to the inference +# code rather than being scraped from YAML at runtime. +ADAPTER_SPECS: dict[str, dict[str, Any]] = { + "lulc": { + "subdir": "lulc_nyc", + "num_classes": 5, + "class_labels": [ + "Trees / vegetation", + "Cropland", + "Built / impervious", + "Bare ground", + "Water", + ], + }, + "buildings": { + "subdir": "buildings_nyc", + "num_classes": 2, + # The decoder emits class 0 = background, class 1 = building. + "class_labels": ["Background", "Building footprint"], + }, +} + +# Tile-window size — TerraMind's training resolution. Stride < window +# yields overlap (smooths seams from window-boundary classification +# noise); 96 px overlap matches the experiments/18 ensemble. +TILE_SIZE = 224 +TILE_STRIDE = 128 + +# One-shot lazy-init guards. The base TerraMind weights are heavy +# (~1.6 GB) and we want to load them once across LULC and Buildings. +_INIT_LOCK = threading.Lock() +_BASE_LOADED = False +_ADAPTERS: dict[str, Any] = {} # name -> built terratorch task on DEVICE + + +def _has_required_deps() -> tuple[bool, str | None]: + """Probe the heavy-EO deps. Same shape as prithvi_live's check — + a missing dep (terratorch / peft / safetensors / hf_hub) returns a + clean `skipped: deps_unavailable` outcome instead of a noisy + ModuleNotFoundError in the trace. + + On the HF Space, terratorch's import chain itself can raise + RuntimeError("operator torchvision::nms does not exist") when the + torchvision binary extension can't load against our CPU torch + wheel. Treat that as 'unavailable' too — the local inference path + is dead-on-arrival there.""" + missing: list[str] = [] + for name in ("terratorch", "peft", "safetensors", "huggingface_hub", + "torch", "yaml"): + try: + __import__(name) + except ImportError: + missing.append(name) + except Exception as e: + # torchvision::nms RuntimeError, libcuda load failure, etc. + log.warning("terramind_nyc: %s import raised %s; treating as " + "unavailable", name, type(e).__name__) + missing.append(f"{name} ({type(e).__name__})") + if missing: + return False, ", ".join(missing) + return True, None + + +_DEPS_OK, _DEPS_MISSING = _has_required_deps() + + +def _ensure_adapter(adapter_name: str): + """Build the terratorch SemanticSegmentationTask, inject the LoRA + scaffold, load the published Δ + decoder weights, return the task. + + Per-task tasks share the TerraMind base inside terratorch's model + factory — calling SemanticSegmentationTask twice loads the base + twice in fp32 (~3.3 GB resident on CPU). For a two-task family this + is acceptable; we don't need the cross-task weight sharing the + experiments/18 ensemble does. If memory becomes a problem, swap + this for a single-task / hot-swap-adapter implementation. + """ + if adapter_name not in ADAPTER_SPECS: + raise KeyError(f"unknown adapter {adapter_name!r}; " + f"expected one of {list(ADAPTER_SPECS)}") + if adapter_name in _ADAPTERS: + return _ADAPTERS[adapter_name] + + with _INIT_LOCK: + if adapter_name in _ADAPTERS: + return _ADAPTERS[adapter_name] + + spec = ADAPTER_SPECS[adapter_name] + log.info("terramind_nyc: building task for %s", adapter_name) + + from huggingface_hub import snapshot_download + from peft import LoraConfig, inject_adapter_in_model + from safetensors.torch import load_file + from terratorch.tasks import SemanticSegmentationTask + + # 1. Pull the requested adapter subtree from the HF repo. + adapter_root = snapshot_download( + ADAPTERS_REPO, + allow_patterns=[f"{spec['subdir']}/*"], + ) + + # 2. Build the standard terratorch task with the same model_args + # the published HF_README quick-start uses. + task = SemanticSegmentationTask( + model_factory="EncoderDecoderFactory", + model_args=dict( + backbone="terramind_v1_base", + backbone_pretrained=True, + backbone_modalities=["S2L2A", "S1RTC", "DEM"], + backbone_use_temporal=True, + backbone_temporal_pooling="concat", + backbone_temporal_n_timestamps=4, + necks=[ + {"name": "SelectIndices", "indices": [2, 5, 8, 11]}, + {"name": "ReshapeTokensToImage", "remove_cls_token": False}, + {"name": "LearnedInterpolateToPyramidal"}, + ], + decoder="UNetDecoder", + decoder_channels=[512, 256, 128, 64], + head_dropout=0.1, + num_classes=spec["num_classes"], + ), + loss="ce", lr=1e-4, freeze_backbone=False, freeze_decoder=False, + ) + + # 3. Inject the LoRA scaffold the adapter weights were trained + # against. Same hyperparameters every adapter in this family + # used (see experiments/18 adapters/_template/config.yaml). + inject_adapter_in_model(LoraConfig( + r=16, lora_alpha=32, lora_dropout=0.05, + target_modules=["attn.qkv", "attn.proj"], bias="none", + ), task.model.encoder) + + # 4. Restore Δ matrices (encoder LoRA) and the decoder/neck/head + # weights from the safetensors bundle. The encoder.* prefix + # is stripped because the encoder state-dict is rooted at + # the encoder module, not the task. + adapter_dir = f"{adapter_root}/{spec['subdir']}" + lora_state = load_file(f"{adapter_dir}/adapter_model.safetensors") + head_state = load_file(f"{adapter_dir}/decoder_head.safetensors") + encoder_state = { + k.removeprefix("encoder."): v + for k, v in lora_state.items() if k.startswith("encoder.") + } + task.model.encoder.load_state_dict(encoder_state, strict=False) + for sub in ("decoder", "neck", "head", "aux_heads"): + sub_state = { + k[len(sub) + 1:]: v + for k, v in head_state.items() if k.startswith(sub + ".") + } + if sub_state and hasattr(task.model, sub): + getattr(task.model, sub).load_state_dict(sub_state, + strict=False) + + # 5. Move to the configured device. CUDA only if the caller + # asked AND a CUDA device is actually available — silently + # fall back to CPU otherwise. + target_device = DEVICE + if target_device == "cuda": + import torch + if not torch.cuda.is_available(): + log.warning("terramind_nyc: CUDA unavailable, falling back to CPU") + target_device = "cpu" + task = task.to(target_device).eval() + + _ADAPTERS[adapter_name] = task + log.info("terramind_nyc: %s ready on %s", adapter_name, target_device) + return task + + +def _tiled_predict(task, modality_chips: dict, num_classes: int): + """Run the task's encoder-decoder forward in 224×224 tiles, returning + a (1, num_classes, H, W) logits tensor stitched from the windows. + + TerraMind's positional embeddings are tied to the 224×224 training + resolution. terratorch's tiled_inference helper slides a window + across the input modalities (it accepts a dict of per-modality + tensors as long as all modalities share H×W), runs the model on + each crop, and averages overlapping logits. Without it, larger + chips return silent garbage; smaller chips error on the encoder + ViT. + """ + import torch + from terratorch.tasks.tiled_inference import tiled_inference + + # tiled_inference invokes `model_forward(patch)` per tile. The task + # model returns a ModelOutput-like with .output OR a plain tensor; + # coerce to tensor either way. + def _forward(x, **_extra): + out = task.model(x) + return out.output if hasattr(out, "output") else out + + with torch.no_grad(): + logits = tiled_inference( + _forward, + modality_chips, + out_channels=num_classes, + h_crop=TILE_SIZE, + w_crop=TILE_SIZE, + h_stride=TILE_STRIDE, + w_stride=TILE_STRIDE, + average_patches=True, + blend_overlaps=True, + padding="reflect", + ) + return logits + + +def _summarize_lulc(pred, class_labels: list[str]) -> dict[str, Any]: + """Per-class pixel fraction + dominant class from an integer mask.""" + import numpy as np + pred_np = pred.detach().cpu().numpy() if hasattr(pred, "detach") else np.asarray(pred) + flat = pred_np.reshape(-1) + n = max(int(flat.size), 1) + fractions: dict[str, float] = {} + for idx, label in enumerate(class_labels): + pct = 100.0 * float((flat == idx).sum()) / n + if pct > 0: + fractions[label] = round(pct, 2) + dominant_idx = int(max(range(len(class_labels)), + key=lambda i: int((flat == i).sum()))) + return { + "ok": True, + "n_pixels": int(flat.size), + "shape": list(pred_np.shape), + "class_fractions": fractions, + "dominant_class": class_labels[dominant_idx], + "dominant_pct": fractions.get(class_labels[dominant_idx], 0.0), + } + + +def _summarize_buildings(pred, class_labels: list[str]) -> dict[str, Any]: + """Building-pixel coverage + simple connected-component count.""" + import numpy as np + pred_np = pred.detach().cpu().numpy() if hasattr(pred, "detach") else np.asarray(pred) + mask = (pred_np == 1).astype("uint8") + n_total = max(int(mask.size), 1) + pct_built = 100.0 * float(mask.sum()) / n_total + # Connected-component count is a cheap signal of "how many distinct + # buildings does this chip cover" — useful for the briefing without + # paying for full polygonisation. + n_components: int | None = None + try: + from scipy.ndimage import label + _, n_components = label(mask) + except Exception: # scipy is optional in some HF Spaces build cones + log.debug("terramind_nyc: scipy.ndimage unavailable; " + "skipping component count") + return { + "ok": True, + "n_pixels": int(mask.size), + "shape": list(mask.shape), + "pct_buildings": round(pct_built, 2), + "n_building_components": n_components, + "class_labels": class_labels, + } + + +def _try_remote(adapter_name: str, modality_chips: dict) -> dict | None: + """POST to the riprap-models inference service if configured. + + Returns: + - successful result dict on a 200/ok=True remote response + - {"ok": False, "skipped": ""} when remote was attempted + but failed (RemoteUnreachable, ok=False, or other error). The + caller MUST NOT fall through to local terratorch in this case + — local has been broken on the CPU-tier UI Spaces since the + torchvision binary mismatch landed, and we'd rather show a + clean "remote unreachable" reason than a noisy crash. + - None ONLY when remote isn't configured at all (caller may + legitimately try local then).""" + try: + from app import inference as _inf + if not _inf.remote_enabled(): + return None + s2 = modality_chips.get("S2L2A") + s1 = modality_chips.get("S1RTC") + dem = modality_chips.get("DEM") + # The router serializes torch tensors to base64 numpy float32 — + # the chip cache hands us [B, C, T, H, W]; keep that shape, the + # service rebuilds the temporal stack on its end. + result = _inf.terramind(adapter_name, s2, s1, dem) + if not result.get("ok"): + err = result.get("error") or result.get("err") or "unknown" + return {"ok": False, + "skipped": f"remote terramind/{adapter_name} non-ok: {err}"} + result.setdefault("adapter", adapter_name) + result.setdefault("repo", ADAPTERS_REPO) + result["compute"] = f"remote · {result.get('device', 'gpu')}" + # Polygonize the prediction raster onto the chip's bounds so + # the map can paint the LULC / buildings overlay. Bounds come + # via the modality_chips dict — the eo_chip layer threads them + # through. Best-effort; never raises into the FSM. + bounds = modality_chips.get("bounds_4326") if modality_chips else None + pred_b64 = result.get("pred_b64") + pred_shape = result.get("pred_shape") + class_labels = result.get("class_labels") + if bounds and pred_b64 and pred_shape: + try: + from app.context._polygonize import ( + polygonize_binary_mask, polygonize_class_raster, + ) + if adapter_name == "buildings": + polys = polygonize_binary_mask( + pred_b64, pred_shape, tuple(bounds), + label="building", fill_color="#D62728", + simplify_tolerance=2e-5, + ) + else: + polys = polygonize_class_raster( + pred_b64, pred_shape, class_labels, tuple(bounds), + simplify_tolerance=2e-5, + ) + result["polygons_geojson"] = polys + except Exception: + log.exception("terramind/%s: polygonize failed", adapter_name) + result["polygons_geojson"] = None + return result + except _inf.RemoteUnreachable as e: + log.info("terramind/%s: remote unreachable (%s)", adapter_name, e) + return {"ok": False, + "skipped": f"remote terramind/{adapter_name} unreachable: {e}"} + except Exception as e: + log.exception("terramind/%s: remote call failed", adapter_name) + return {"ok": False, + "skipped": f"remote terramind/{adapter_name} error: " + f"{type(e).__name__}: {e}"} + + +def _run(adapter_name: str, modality_chips: dict, summarizer): + """Common boilerplate: gate, time, [remote attempt], load, tiled + predict, summarize.""" + if not ENABLE: + return {"ok": False, + "skipped": "RIPRAP_TERRAMIND_NYC_ENABLE=0"} + + # v0.4.5 — try remote first. The remote service has its own deps, + # so this path works even when local _DEPS_OK is False (the most + # common HF Spaces case until terratorch + peft are baked in). + remote = _try_remote(adapter_name, modality_chips or {}) + if remote is not None: + return remote + + if not _DEPS_OK: + return {"ok": False, + "skipped": f"deps unavailable on this deployment: " + f"{_DEPS_MISSING}"} + if not modality_chips: + return {"ok": False, "err": "no modality chips supplied"} + t0 = time.time() + try: + task = _ensure_adapter(adapter_name) + spec = ADAPTER_SPECS[adapter_name] + # Strip out bounds_4326 (auxiliary metadata, not a tensor) before + # handing the dict to terratorch's tiled_inference, which iterates + # all values as modalities. + tensors_only = {k: v for k, v in modality_chips.items() + if k != "bounds_4326"} + logits = _tiled_predict(task, tensors_only, spec["num_classes"]) + # logits: (B, C, H, W). Argmax to per-pixel class id. + pred = logits.argmax(dim=1).squeeze(0) + result = summarizer(pred, spec["class_labels"]) + result["elapsed_s"] = round(time.time() - t0, 2) + result["adapter"] = adapter_name + result["repo"] = ADAPTERS_REPO + result["compute"] = "local" + return result + except Exception as e: + msg = str(e) + # Translate torchvision binary-extension failures into a clean + # skip. terratorch + torchvision both ride a transitive + # dep cone on the HF Space (sentence-transformers pulls torch + # CPU; torchvision's C extension can't load against that wheel), + # so a local _ensure_adapter() raises RuntimeError with this + # signature when remote is also unreachable. Clean skip is the + # honest demo outcome — same as terramind_synthesis. + if "torchvision::nms" in msg or "torchvision_C" in msg: + log.warning("terramind_nyc/%s: torchvision binary unavailable; " + "remote unreachable too; clean skip", adapter_name) + return {"ok": False, + "skipped": "remote inference unreachable + local " + "torchvision binary unavailable on this " + "deployment", + "elapsed_s": round(time.time() - t0, 2)} + log.exception("terramind_nyc.%s failed", adapter_name) + return {"ok": False, "err": f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} + + +def lulc(s2l2a, s1rtc=None, dem=None, + bounds_4326: tuple[float, float, float, float] | None = None, + ) -> dict[str, Any]: + """5-class NYC macro land-cover. + + Inputs are torch tensors. The temporal models we trained expect + [C, T, H, W] (preferred) or [C, H, W] (will be expanded to T=1). + Pass S1 and DEM if you have them — the published adapter was + trained on the full triplet and accuracy degrades when modalities + are dropped. + + `bounds_4326` is `(minlon, minlat, maxlon, maxlat)` of the chip + in WGS84; when provided, the LULC raster is polygonised onto the + chip's geographic extent so the map can render an overlay. + """ + chips = {"S2L2A": s2l2a} + if bounds_4326 is not None: + chips["bounds_4326"] = bounds_4326 + if s1rtc is not None: + chips["S1RTC"] = s1rtc + if dem is not None: + chips["DEM"] = dem + return _run("lulc", chips, _summarize_lulc) + + +def buildings(s2l2a, s1rtc=None, dem=None, + bounds_4326: tuple[float, float, float, float] | None = None, + ) -> dict[str, Any]: + """Binary NYC building-footprint mask. Same input contract as lulc().""" + chips = {"S2L2A": s2l2a} + if bounds_4326 is not None: + chips["bounds_4326"] = bounds_4326 + if s1rtc is not None: + chips["S1RTC"] = s1rtc + if dem is not None: + chips["DEM"] = dem + return _run("buildings", chips, _summarize_buildings) + + +def warm(): + """Optional pre-load — amortizes the first-query model build cost.""" + if not ENABLE or not _DEPS_OK: + return + try: + for name in ADAPTER_SPECS: + _ensure_adapter(name) + except Exception: + log.exception("terramind_nyc: warm() failed; specialists will no-op") diff --git a/app/context/terramind_synthesis.py b/app/context/terramind_synthesis.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c1d311ff053f10b886b45c60597811df698974 --- /dev/null +++ b/app/context/terramind_synthesis.py @@ -0,0 +1,468 @@ +"""TerraMind v1 base as a real-time FSM node — DEM → ESRI LULC. + +Per user query: take the geocoded (lat, lon), pull a DEM patch from +Riprap's existing NYC-wide LiDAR raster (already used by the microtopo +specialist — no STAC dependency), run TerraMind to generate a +plausible categorical land-cover map from the terrain context, and +emit class fractions the reconciler can cite as a synthetic-prior +context layer alongside the empirical and modeled flood evidence. + +Why DEM → LULC (and not DEM → S2L2A as initially prototyped): + - LULC is *categorical* and *interpretable*. The output is one of + 10 ESRI Land Cover classes per pixel; class fractions like "78% + Built Area" go straight into the briefing as cite-able claims. + - S2L2A is 12-channel reflectance — uninterpretable downstream + without a separate segmentation head. + - LULC is *comparable to ground truth*: NYC PLUTO land-use class + is already in the data layer; future calibration possible. + +Class label mapping is *tentative* against ESRI 2020-2022 schema +(which TerraMesh's LULC tokenizer was trained on). The doc body +discloses the mapping as tentative and the reconciler is instructed +to use hedged framing ("the synthetic land-cover prior identifies … +likely class …") rather than asserting hard labels. + +Why this shape: + - **No STAC dependency.** Microsoft Planetary Computer search has + been intermittent during this hackathon; the DEM raster is local + and always available. + - **Real-time.** < 0.3 s synthesis + < 0.5 s DEM patch read on M3 + CPU once warm. + - **Honesty discipline.** Synthetic-prior tier, fourth epistemic + class alongside empirical / modeled / proxy. + +License: Apache-2.0 — `ibm-esa-geospatial/TerraMind-1.0-base`. +""" + +from __future__ import annotations + +import logging +import os +import random +import threading +import time +from typing import Any + +log = logging.getLogger("riprap.terramind") + +ENABLE = os.environ.get("RIPRAP_TERRAMIND_ENABLE", "1").lower() in ("1", "true", "yes") +DEFAULT_STEPS = int(os.environ.get("RIPRAP_TERRAMIND_STEPS", "10")) +DEFAULT_SEED = int(os.environ.get("RIPRAP_TERRAMIND_SEED", "42")) +CHIP_PX = int(os.environ.get("RIPRAP_TERRAMIND_CHIP_PX", "224")) +CHIP_M = CHIP_PX * 30 # NYC DEM is at 30 m -> 6.72 km square +HALF_M = CHIP_M / 2 + +_MODEL = None +_INIT_LOCK = threading.Lock() + +# Tentative ESRI 2020-2022 Land Cover class mapping for TerraMind v1's +# LULC tokenizer output (10 channels, argmax over channel axis -> class +# index 0-9). The README/docs don't expose the exact mapping and the +# tokenizer source confirms only "ESRI LULC" without a label table, so +# the names below are best-effort. The doc body discloses tentativeness. +LULC_CLASSES = [ + "water", # 0 + "trees", # 1 + "grass", # 2 + "flooded_vegetation", # 3 + "crops", # 4 + "scrub_shrub", # 5 + "built_area", # 6 + "bare_ground", # 7 + "snow_ice", # 8 + "clouds_or_no_data", # 9 +] + + +def _has_required_deps() -> tuple[bool, str | None]: + """Probe deps. terramind_synthesis runs only locally (no remote path + in app/inference.py for DEM-driven synthesis), so it always needs + terratorch. On the HF Space terratorch isn't installed, so this + specialist returns a clean `skipped: deps unavailable` outcome. + + Distinguishes a *truly missing* package (ModuleNotFoundError) from + a *transient race* (other ImportError — typically sklearn's + "partially initialized module" from concurrent imports).""" + missing = [] + for name in ("terratorch", "rasterio"): + try: + __import__(name) + except ModuleNotFoundError: + missing.append(name) + except ImportError: + log.debug("terramind: import race on %s, will retry on demand", name) + except Exception as e: + # torchvision::nms RuntimeError on HF Space — local inference + # is unavailable; treat as missing so fetch() returns a clean + # skip rather than crashing in _ensure_model. + log.warning("terramind: %s import raised %s; treating as " + "unavailable", name, type(e).__name__) + missing.append(f"{name} ({type(e).__name__})") + return (not missing, ", ".join(missing) if missing else None) + + +_DEPS_OK, _DEPS_MISSING = _has_required_deps() + + +def _ensure_model(): + """Lazy load with a lock so the parallel-block worker can't double-init.""" + global _MODEL + if _MODEL is not None: + return _MODEL + with _INIT_LOCK: + if _MODEL is not None: + return _MODEL + # Heavy import deferred to first call so module import stays cheap + # and HF Spaces (no terratorch) doesn't pay it at all. + import terratorch.models.backbones.terramind.model.terramind_register # noqa + from terratorch.registry import FULL_MODEL_REGISTRY + log.info("terramind: loading v1 base generate (DEM -> LULC)") + m = FULL_MODEL_REGISTRY.build( + "terratorch_terramind_v1_base_generate", + modalities=["DEM"], + output_modalities=["LULC"], + pretrained=True, + timesteps=DEFAULT_STEPS, + ) + m.eval() + _MODEL = m + log.info("terramind: model ready") + return _MODEL + + +def warm(): + """Call at app boot to amortize the ~6 s checkpoint load + first-call + JIT. No-op when deps are absent.""" + if ENABLE and _DEPS_OK: + try: + _ensure_model() + except Exception: + log.exception("terramind: warm() failed; specialist will no-op") + + +def _read_dem_patch(lat: float, lon: float): + """Read a CHIP_PX×CHIP_PX DEM patch centered on (lat, lon) from the + local NYC-wide LiDAR raster. Returns (array, bounds_4326) where + bounds_4326 is (minlon, minlat, maxlon, maxlat) so the synthesised + LULC can be georeferenced onto the same extent for map rendering. + Returns None if outside the raster's extent.""" + from pathlib import Path + + import numpy as np + import rasterio + from rasterio.windows import from_bounds + dem_path = (Path(__file__).resolve().parents[2] + / "data" / "nyc_dem_30m.tif") + if not dem_path.exists(): + return None + with rasterio.open(dem_path) as src: + # The DEM is in EPSG:4326 (geographic) in our cache — convert + # the chip extent in the same CRS by building a rough degree + # bbox from a meters-square half-side at NYC latitude. + # 1 degree lat ≈ 111 km, 1 degree lon ≈ 85 km at 40.7°N. + d_lat = (HALF_M / 111_000.0) + d_lon = (HALF_M / 85_000.0) + win = from_bounds(lon - d_lon, lat - d_lat, + lon + d_lon, lat + d_lat, + src.transform) + arr = src.read(1, window=win, boundless=True, fill_value=0).astype("float32") + if arr.size == 0 or arr.shape[0] < 8 or arr.shape[1] < 8: + return None + # Resize to CHIP_PX × CHIP_PX via torch interpolation. The exact + # pixel-perfect alignment doesn't matter for a synthetic prior; the + # model just needs a real terrain patch to condition on. + import torch + t = torch.from_numpy(arr).unsqueeze(0).unsqueeze(0) + t = torch.nn.functional.interpolate(t, size=(CHIP_PX, CHIP_PX), + mode="bilinear", align_corners=False) + out = t.squeeze(0).numpy() # (1, CHIP_PX, CHIP_PX) + # Replace NaN sentinel values with median elevation so the model + # doesn't see NaN tokens. + if np.isnan(out).any(): + med = float(np.nanmedian(out)) + out = np.nan_to_num(out, nan=med) + bounds_4326 = (lon - d_lon, lat - d_lat, lon + d_lon, lat + d_lat) + return out, bounds_4326 + + +# Map class index -> visual color for the categorical fill on the +# MapLibre layer. Colors picked to be visually distinct from the +# existing red (Sandy) / blue (DEP) / cyan (Prithvi) / orange (Ida HWM). +LULC_FILL_COLORS = { + "water": "#0284c7", # not used (we keep water clear so + # the underlying basemap shows) + "trees": "#16a34a", # green + "grass": "#86efac", # pale green + "flooded_vegetation": "#a3e635", # lime + "crops": "#fde047", # yellow + "scrub_shrub": "#bef264", + "built_area": "#9ca3af", # neutral gray + "bare_ground": "#d6d3d1", # warm light gray + "snow_ice": "#f3f4f6", + "clouds_or_no_data": "#000000", # not used (kept transparent) +} +# Classes we don't render at all (transparent) — water is best left +# uncolored so the basemap shoreline reads through; clouds/no-data is +# semantically meaningless to fill. +LULC_HIDE_CLASSES = {"water", "clouds_or_no_data"} + + +def _polygonize_lulc(class_idx, bounds_4326: tuple) -> dict: + """Vectorize the per-pixel argmax classification into one MultiPolygon + per class label, then dump as a single GeoJSON FeatureCollection in + EPSG:4326. Each feature carries `label` + `class_idx` properties so + the frontend can colour by category. + """ + import json + + import geopandas as gpd + from rasterio.features import shapes + from rasterio.transform import from_bounds as transform_from_bounds + from shapely.geometry import shape + + minlon, minlat, maxlon, maxlat = bounds_4326 + h, w = class_idx.shape + transform = transform_from_bounds(minlon, minlat, maxlon, maxlat, w, h) + feats = [] + for i, label in enumerate(LULC_CLASSES): + if label in LULC_HIDE_CLASSES: + continue + mask = (class_idx == i).astype("uint8") + if mask.sum() < 8: # skip tiny noise + continue + polys = [] + for geom, value in shapes(mask, mask=mask.astype(bool), + transform=transform): + if value != 1: + continue + polys.append(shape(geom)) + if not polys: + continue + # Dissolve via geopandas + simplify lightly. The chip is 30 m + # per pixel and we don't need pixel-edge fidelity at urban zoom. + gdf = gpd.GeoDataFrame({"geometry": polys}, crs="EPSG:4326") + gdf["geometry"] = gdf.geometry.simplify(1e-4, preserve_topology=True) + for geom in gdf.geometry: + feats.append({ + "type": "Feature", + "geometry": json.loads(gpd.GeoSeries([geom], + crs="EPSG:4326").to_json())["features"][0]["geometry"], + "properties": {"label": label, "class_idx": i, + "fill_color": LULC_FILL_COLORS.get(label, "#9ca3af")}, + }) + return {"type": "FeatureCollection", "features": feats} + + +def fetch(lat: float, lon: float, timeout_s: float = 60.0) -> dict[str, Any]: + """Run the specialist. Returns: + { ok: bool, + skipped: str | None, + synthetic_modality: bool, + tim_chain: list[str], + diffusion_steps: int, diffusion_seed: int, + dem_mean_m: float, + class_fractions: dict[str, float], # tentative ESRI labels + dominant_class: str, # highest-fraction label + dominant_pct: float, + n_classes_observed: int, + chip_shape: list[int], + elapsed_s: float, + err: str | None } + + Designed never to raise. Failures show up as ok=False with reason. + """ + if not ENABLE: + return {"ok": False, "skipped": "RIPRAP_TERRAMIND_ENABLE=0"} + t0 = time.time() + try: + import numpy as np + patch = _read_dem_patch(lat, lon) + if patch is None: + return {"ok": False, "skipped": "no DEM coverage at this point"} + dem, bounds_4326 = patch + dem_mean = float(dem.mean()) + + # v0.4.5+ — try the MI300X inference service first if configured. + # The droplet's /v1/terramind dispatch handles adapter='synthesis' + # via _terramind_synthesis_inference (DEM -> generative LULC). On + # the HF Space terratorch's torchvision binary doesn't load, so + # this is the only working path there. + try: + from app import inference as _inf + if _inf.remote_enabled(): + # The terramind v1 base generative encoder embedding + # layer unpacks `B, C, H, W = x.shape` (verified against + # terratorch_terramind_v1_base_generate). DEM has C=1, so + # the on-the-wire shape is (1, 1, H, W) 4-D. + # `_read_dem_patch` returns a 3-D (1, H, W) array (it + # interpolates to CHIP_PX×CHIP_PX through a 4-D + # torch.functional.interpolate then squeezes the batch), + # so we add only the batch dim — not two. + import numpy as _np_local + dem_arr = _np_local.asarray(dem, dtype="float32") + if dem_arr.ndim == 2: # (H, W) + dem_remote = dem_arr[None, None, :, :] + elif dem_arr.ndim == 3: # (1, H, W) + dem_remote = dem_arr[None, :, :, :] + elif dem_arr.ndim == 4: # already (1, 1, H, W) + dem_remote = dem_arr + else: + raise ValueError( + f"unexpected DEM shape {dem_arr.shape}; " + "expected 2/3/4-D") + remote = _inf.terramind("synthesis", None, None, dem_remote, + timeout=timeout_s) + if remote.get("ok"): + elapsed = round(time.time() - t0, 2) + # Polygonize the prediction raster for the map + # layer. The droplet returns the per-pixel argmax; + # we vectorize against the chip's bounds. + polys = None + pred_b64 = remote.get("pred_b64") + pred_shape = remote.get("pred_shape") + class_labels = (remote.get("class_labels") + or LULC_CLASSES) + if pred_b64 and pred_shape: + try: + from app.context._polygonize import ( + polygonize_class_raster, + ) + polys = polygonize_class_raster( + pred_b64, pred_shape, class_labels, + tuple(bounds_4326), + simplify_tolerance=2e-5, + ) + except Exception: + log.exception("terramind/synthesis: " + "polygonize failed") + polys = None + out = { + "ok": True, + "synthetic_modality": True, + "tim_chain": ["DEM", "LULC_synthetic"], + "diffusion_steps": remote.get("diffusion_steps", + DEFAULT_STEPS), + "diffusion_seed": DEFAULT_SEED, + "dem_mean_m": round(dem_mean, 2), + "class_fractions": remote.get("class_fractions") or {}, + "dominant_class": remote.get("dominant_class") or "unknown", + "dominant_pct": remote.get("dominant_pct") or 0.0, + "n_classes_observed": remote.get("n_classes_observed") or 0, + "chip_shape": remote.get("shape") or [], + "bounds_4326": list(bounds_4326), + "polygons_geojson": polys, + "label_schema": remote.get("label_schema") or "", + "compute": f"remote · {remote.get('device', 'gpu')}", + "elapsed_s": elapsed, + } + return out + # remote returned non-ok — surface that signal directly + return {"ok": False, + "skipped": f"remote terramind synthesis non-ok: " + f"{remote.get('error') or remote.get('detail') or 'unknown'}", + "elapsed_s": round(time.time() - t0, 2)} + except _inf.RemoteUnreachable as e: + log.info("terramind_synthesis: remote unreachable (%s); local fallback", e) + except Exception as e: + log.exception("terramind_synthesis: remote call failed") + return {"ok": False, + "skipped": f"remote terramind synthesis error: " + f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} + + # Local fallback — original path; only available where terratorch + # imports without the torchvision::nms RuntimeError. + if not _DEPS_OK: + return {"ok": False, "skipped": f"deps unavailable: {_DEPS_MISSING}"} + import torch + random.seed(DEFAULT_SEED) + torch.manual_seed(DEFAULT_SEED) + + model = _ensure_model() + # `dem` is 2-D (H, W) from `_read_dem_patch.src.read(1, ...)`. The + # terramind v1 base generative encoder wants (B=1, C=1, H, W) 4-D. + dem_t = torch.from_numpy(dem).unsqueeze(0).unsqueeze(0).float() + if time.time() - t0 > timeout_s: + return {"ok": False, "skipped": "terramind exceeded budget"} + + with torch.no_grad(): + out = model({"DEM": dem_t}, timesteps=DEFAULT_STEPS, + verbose=False) + lulc = out["LULC"] + if hasattr(lulc, "detach"): + lulc = lulc.detach().cpu().numpy() + if lulc.ndim == 4: + lulc = lulc[0] # (n_classes, H, W) + # Argmax over class channel -> per-pixel class index, then + # fraction by class. This is the cite-able structured output. + class_idx = lulc.argmax(axis=0) # (H, W) + unique, counts = np.unique(class_idx, return_counts=True) + total = float(class_idx.size) + fractions: dict[str, float] = {} + for u, c in zip(unique, counts, strict=False): + label = (LULC_CLASSES[int(u)] if 0 <= int(u) < len(LULC_CLASSES) + else f"class_{int(u)}") + fractions[label] = round(100.0 * c / total, 2) + # Sort dominant -> tail for deterministic doc body ordering. + ordered = dict(sorted(fractions.items(), + key=lambda kv: kv[1], reverse=True)) + dominant_class = next(iter(ordered)) if ordered else "unknown" + dominant_pct = ordered.get(dominant_class, 0.0) + # Class indices map to TerraMesh's LULC tokenizer codebook; the + # exact label-to-index mapping isn't published. Surface a tentative + # name plus the raw index so a reader can see we're not asserting + # ground truth. + dominant_idx = next((i for i, lbl in enumerate(LULC_CLASSES) + if lbl == dominant_class), -1) + dominant_display = ( + f"class_{dominant_idx} (tentative: {dominant_class})" + if dominant_idx >= 0 else dominant_class + ) + + # Polygonize the categorical raster for the map layer. + # Best-effort — failure here doesn't fail the specialist. + try: + polygons_geojson = _polygonize_lulc(class_idx, bounds_4326) + except Exception: + log.exception("terramind: polygonize failed; skipping map layer") + polygons_geojson = None + + return { + "ok": True, + "synthetic_modality": True, + "tim_chain": ["DEM", "LULC_synthetic"], + "diffusion_steps": DEFAULT_STEPS, + "diffusion_seed": DEFAULT_SEED, + "dem_mean_m": round(dem_mean, 2), + "class_fractions": ordered, + "dominant_class": dominant_class, + "dominant_class_display": dominant_display, + "dominant_pct": dominant_pct, + "n_classes_observed": len(ordered), + "chip_shape": list(lulc.shape), + "bounds_4326": list(bounds_4326), + "polygons_geojson": polygons_geojson, + "label_schema": "ESRI 2020-2022 Land Cover (tentative — " + "TerraMind tokenizer source confirms ESRI but " + "not exact label-to-index mapping)", + "elapsed_s": round(time.time() - t0, 2), + } + except Exception as e: + msg = str(e) + # Translate the torchvision binary-extension failure into a clean + # skip. The HF Space ships torchvision via a transitive sentence- + # transformers dep, but its C extension can't load alongside our + # CPU torch wheel, so terratorch's NMS call raises RuntimeError. + # Surface this honestly — the local inference path is unavailable + # on this deployment, same outcome as a missing terratorch. + if "torchvision::nms" in msg or "torchvision_C" in msg: + log.warning("terramind: torchvision binary unavailable on this " + "deployment; skipping local inference") + return {"ok": False, + "skipped": "local inference unavailable on this " + "deployment (torchvision binary extension " + "not loadable); no remote synthesis path", + "elapsed_s": round(time.time() - t0, 2)} + log.exception("terramind: fetch failed") + return {"ok": False, "err": f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} diff --git a/app/emissions.py b/app/emissions.py new file mode 100644 index 0000000000000000000000000000000000000000..11d5983da6d582d08944b145758dc28f0824f85f --- /dev/null +++ b/app/emissions.py @@ -0,0 +1,269 @@ +"""Per-query emissions tracker for inference calls. + +Records every LLM and ML-inference call made during a single query and +summarizes: + - wallclock duration per call + - prompt + completion tokens (LLM) + - energy in watt-hours, **measured from the L4 GPU when available** + (the inference proxy reports per-call `X-GPU-Power-W` / + `X-GPU-Energy-J` headers from a 100 ms-cadence NVML sampler). + Falls back to a duration × data-sheet-power estimate when the + proxy is unreachable / NVML init failed / call went to a backend + that doesn't surface power readings. + +Each call record carries a `measured: bool` flag indicating which path +was used, so the UI can disclose. `summarize()` aggregates total Wh, +total tokens, by-kind and by-hardware splits — no cloud comparison. + +Thread propagation +------------------ +The tracker is held in a thread-local. The dispatch layer +(web/main.py) installs one per request; `app/fsm.py:iter_steps` +captures and re-installs it on the FSM runner thread (mirroring the +existing `_captured_token_cb` pattern). Worker threads spawned inside +specialists (prithvi_live, eo_chip_cache) inherit nothing — those calls +are silently dropped, which is acceptable: those specialists do <1 s of +inference each and are off the hot path for the energy story. +""" +from __future__ import annotations + +import threading +from typing import Any + +# (label, fallback_sustained_power_w, source). Used only when the +# proxy doesn't surface a real measurement (NVML disabled, backend +# unreachable, local-fallback path). The fallback figure is a +# conservative public-record estimate; the `measured: bool` flag on +# each call record indicates whether the row used the fallback. +HARDWARE: dict[str, tuple[str, float, str]] = { + "nvidia_l4": ( + "NVIDIA L4", + 60.0, + "NVIDIA L4 Tensor Core GPU data sheet (72 W TGP, Ada Lovelace, " + "24 GB); ~60 W sustained during transformer inference. The " + "active backend for the Riprap inference Space " + "(msradam/riprap-vllm). When the proxy is reachable and NVML " + "is initialized, real per-call power is read off the device " + "via nvmlDeviceGetPowerUsage and this fallback is unused.", + ), + "amd_mi300x": ( + "AMD MI300X", + 600.0, + "AMD Instinct MI300X data sheet (750 W TDP); ~600 W sustained " + "during vLLM generation. Selected only when an operator deploys " + "against an MI300X droplet and sets RIPRAP_HARDWARE_LABEL=AMD " + "MI300X explicitly. The hackathon submission used to run on " + "this hardware; the droplet was decommissioned 2026-05-06.", + ), + "nvidia_t4": ( + "NVIDIA T4", + 50.0, + "NVIDIA T4 data sheet (70 W max); ~50 W sustained during " + "transformer inference.", + ), + "apple_m": ( + "Apple M-series", + 20.0, + "ml.energy / community measurements: ~20 W package power " + "during Granite 4.1 q4_K_M inference on Apple M3/M4 (the " + "local-dev path, no remote backend configured).", + ), + "cpu_server": ( + "x86 CPU", + 30.0, + "Typical sustained x86 server-core load (~30 W) for CPU-only " + "inference fallbacks.", + ), +} + + +def _wh(power_w: float, duration_s: float) -> float: + return power_w * max(duration_s, 0.0) / 3600.0 + + +class Tracker: + """Append-only call ledger for one query. Thread-safe.""" + + def __init__(self) -> None: + self.calls: list[dict[str, Any]] = [] + self._lock = threading.Lock() + + def _record(self, *, base: dict[str, Any], hardware: str, + duration_s: float, + joules_real: float | None, + power_w_real: float | None) -> None: + """Shared body of record_llm / record_ml. + + When `joules_real` is provided (NVML-derived from the proxy), + we use it directly and stamp `measured=True`. Otherwise we + fall back to the data-sheet sustained-power estimate. + """ + hw_label, fallback_w, _src = HARDWARE.get(hardware, + HARDWARE["cpu_server"]) + if joules_real is not None and joules_real >= 0: + joules = float(joules_real) + wh = joules / 3600.0 + measured = True + avg_w = (joules / duration_s) if duration_s > 0 else ( + power_w_real if power_w_real is not None else fallback_w) + else: + avg_w = fallback_w + wh = _wh(avg_w, duration_s) + joules = wh * 3600.0 + measured = False + record = { + **base, + "hardware": hardware, + "hardware_label": hw_label, + "power_w": round(avg_w, 2), + "duration_s": round(duration_s, 3), + "measured": measured, + "wh": round(wh, 5), + "joules": round(joules, 3), + } + with self._lock: + self.calls.append(record) + + def record_llm(self, *, model: str, backend: str, hardware: str, + prompt_tokens: int | None, + completion_tokens: int | None, + duration_s: float, + stream: bool = False, + joules_real: float | None = None, + power_w_real: float | None = None) -> None: + total = None + if prompt_tokens is not None or completion_tokens is not None: + total = (prompt_tokens or 0) + (completion_tokens or 0) + self._record( + base={ + "kind": "llm", + "model": model, + "backend": backend, + "prompt_tokens": prompt_tokens, + "completion_tokens": completion_tokens, + "total_tokens": total, + "stream": stream, + }, + hardware=hardware, + duration_s=duration_s, + joules_real=joules_real, + power_w_real=power_w_real, + ) + + def record_ml(self, *, endpoint: str, backend: str, hardware: str, + duration_s: float, + joules_real: float | None = None, + power_w_real: float | None = None) -> None: + self._record( + base={ + "kind": "ml", + "endpoint": endpoint, + "backend": backend, + }, + hardware=hardware, + duration_s=duration_s, + joules_real=joules_real, + power_w_real=power_w_real, + ) + + def summarize(self) -> dict[str, Any]: + with self._lock: + calls = list(self.calls) + total_wh = sum(c["wh"] for c in calls) + total_dur = sum(c["duration_s"] for c in calls) + n_measured = sum(1 for c in calls if c.get("measured")) + prompt = sum((c.get("prompt_tokens") or 0) + for c in calls if c["kind"] == "llm") + completion = sum((c.get("completion_tokens") or 0) + for c in calls if c["kind"] == "llm") + + by_kind: dict[str, dict[str, Any]] = {} + for c in calls: + slot = by_kind.setdefault(c["kind"], {"wh": 0.0, "n": 0, + "duration_s": 0.0}) + slot["wh"] += c["wh"] + slot["n"] += 1 + slot["duration_s"] += c["duration_s"] + for slot in by_kind.values(): + slot["wh"] = round(slot["wh"], 5) + slot["mwh"] = round(slot["wh"] * 1000, 2) + slot["duration_s"] = round(slot["duration_s"], 3) + + by_hw: dict[str, dict[str, Any]] = {} + for c in calls: + slot = by_hw.setdefault(c["hardware"], { + "label": c["hardware_label"], + "wh": 0.0, "n": 0, "duration_s": 0.0, + }) + slot["wh"] += c["wh"] + slot["n"] += 1 + slot["duration_s"] += c["duration_s"] + for slot in by_hw.values(): + slot["wh"] = round(slot["wh"], 5) + slot["mwh"] = round(slot["wh"] * 1000, 2) + slot["duration_s"] = round(slot["duration_s"], 3) + + return { + "n_calls": len(calls), + "n_measured": n_measured, + "total_wh": round(total_wh, 5), + "total_mwh": round(total_wh * 1000, 2), + "total_joules": round(total_wh * 3600, 1), + "total_duration_s": round(total_dur, 3), + "tokens": { + "prompt": prompt or None, + "completion": completion or None, + "total": (prompt + completion) or None, + }, + "by_kind": by_kind, + "by_hardware": by_hw, + "calls": calls, + "method": ( + "Energy is read off the L4 GPU per call via " + "nvmlDeviceGetPowerUsage on the inference proxy " + "(X-GPU-Energy-J response header). Calls flagged " + "measured=false fall back to " + "(data-sheet sustained_power_w × duration_s ÷ 3600) " + "— see app/emissions.HARDWARE for sources. Tokens " + "are reported by the backend (LiteLLM usage) when " + "available, else estimated from response text length " + "(~4 chars/token)." + ), + } + + +# Thread-local install. Calls made on threads without an installed +# tracker hit a no-op stub — always safe to call active().record_*(). +_tl = threading.local() + + +class _NullTracker: + def record_llm(self, **_kw: Any) -> None: + return None + + def record_ml(self, **_kw: Any) -> None: + return None + + +_NULL = _NullTracker() + + +def install(tracker: Tracker | None) -> None: + _tl.tracker = tracker + + +def current() -> Tracker | None: + return getattr(_tl, "tracker", None) + + +def active() -> Tracker | _NullTracker: + """Return the installed tracker for this thread, or a no-op stub. + Always safe to call in instrumentation hot paths.""" + return getattr(_tl, "tracker", None) or _NULL + + +def estimate_completion_tokens(text: str) -> int: + """Rough char/4 estimator used when the backend doesn't report usage + (e.g. streaming through Ollama, where LiteLLM's stream wrapper does + not always surface a final usage block).""" + return max(1, len(text) // 4) diff --git a/app/energy.py b/app/energy.py new file mode 100644 index 0000000000000000000000000000000000000000..0805e2131b30d647044ecfe537d9b6af49f29511 --- /dev/null +++ b/app/energy.py @@ -0,0 +1,56 @@ +"""Per-query energy footprint estimate. + +Conservative, defensible numbers — no overclaim. We measure local +inference time and apply a published-range package-power figure for +Apple-Silicon LLM inference; we compare to the most recent published +estimate of frontier-cloud per-query energy (Epoch AI, 2025). + +This is not a benchmark — it's a transparent rule-of-thumb that the +user can audit. The system prompt and the UI both surface the +underlying numbers and the citation. +""" +from __future__ import annotations + +# Local: Granite 4.1:3b on Apple M-series (M3/M4 Pro range) +# Sustained package power during ~5 s of LLM inference, q4_K_M quant. +# Source: ml.energy + community measurements; conservative midpoint. +LOCAL_PACKAGE_POWER_W = 20.0 + +# Frontier cloud per-query inference energy. +# Source: Epoch AI, "How much energy does ChatGPT use?" (2025). +# https://epoch.ai/gradient-updates/how-much-energy-does-chatgpt-use +# This is a typical-query estimate for GPT-4o-class inference; long-context +# queries scale roughly linearly with token count. +CLOUD_PER_QUERY_WH = 0.30 + +# Citation strings used in the UI. +LOCAL_SOURCE = ("ml.energy / community measurements; ~20 W package power " + "during Granite 4.1:3b q4_K_M inference on Apple M-series.") +CLOUD_SOURCE = ('Epoch AI (2025), "How much energy does ChatGPT use?", ' + "estimating ~0.3 Wh per typical GPT-4o query.") + + +def estimate(reconcile_seconds: float, total_seconds: float | None = None) -> dict: + """Return a per-query energy estimate. + + Args: + reconcile_seconds: wallclock of the Granite reconcile step (the + only step that meaningfully draws CPU/GPU power). + total_seconds: optional full-FSM wallclock for context. + """ + local_wh = LOCAL_PACKAGE_POWER_W * reconcile_seconds / 3600.0 + return { + "local_wh": round(local_wh, 4), + "local_mwh": round(local_wh * 1000, 1), + "cloud_wh": CLOUD_PER_QUERY_WH, + "cloud_mwh": round(CLOUD_PER_QUERY_WH * 1000, 1), + "ratio_cloud_over_local": round(CLOUD_PER_QUERY_WH / local_wh, 1) if local_wh > 0 else None, + "method": { + "local": f"{LOCAL_PACKAGE_POWER_W} W × {reconcile_seconds:.2f} s ÷ 3600", + "local_source": LOCAL_SOURCE, + "cloud": f"{CLOUD_PER_QUERY_WH} Wh per query (published estimate)", + "cloud_source": CLOUD_SOURCE, + }, + "reconcile_seconds": round(reconcile_seconds, 2), + "total_seconds": round(total_seconds, 2) if total_seconds is not None else None, + } diff --git a/app/flood_layers/__init__.py b/app/flood_layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/flood_layers/dep_stormwater.py b/app/flood_layers/dep_stormwater.py new file mode 100644 index 0000000000000000000000000000000000000000..7f89ef928f325d80e53a56eae12b454501592415 --- /dev/null +++ b/app/flood_layers/dep_stormwater.py @@ -0,0 +1,168 @@ +"""NYC DEP Stormwater Flood Maps — pluvial scenarios. + +Four scenarios, all in EPSG:2263. Polygons are categorized by depth class: + 1 = Nuisance Flooding (>4" and ≤1 ft) + 2 = Deep and Contiguous Flooding (>1 ft and ≤4 ft) + 3 = Deep Contiguous Flooding (>4 ft) + +Two query paths exist: + join_raster(point) — fast path. Samples the baked GeoTIFFs in + data/baked/. ~3 ms per scenario, ~70 ms cold-open. Used by + step_dep in the FSM. + join(assets) — legacy GDB path via gpd.sjoin. Retained as + a fallback when baked rasters are absent (local dev) and as + the polygon-overlap path used by coverage_for_polygon for + neighborhood mode. +""" +from __future__ import annotations + +import logging +import threading +from functools import lru_cache + +import geopandas as gpd + +from app.spatial import DATA, NYC_CRS + +log = logging.getLogger(__name__) +BAKED = DATA / "baked" +_TLOCAL = threading.local() +_FALLBACK_WARNED = False + +ROOT = DATA / "dep" + +SCENARIOS = { + "dep_extreme_2080": { + "gdb": "dep_extreme_2080.gdb", + "label": "DEP Extreme Stormwater (3.66 in/hr, 2080 SLR)", + }, + "dep_moderate_2050": { + "gdb": "dep_moderate_2050.gdb", + "label": "DEP Moderate Stormwater (2.13 in/hr, 2050 SLR)", + }, + "dep_moderate_current": { + "gdb": "dep_moderate_current.gdb", + "label": "DEP Moderate Stormwater (2.13 in/hr, current SLR)", + }, +} + +DEPTH_CLASS = { + 1: "Nuisance (>4 in to 1 ft)", + 2: "Deep & Contiguous (1-4 ft)", + 3: "Deep Contiguous (>4 ft)", +} + + +@lru_cache(maxsize=4) +def load(scenario: str) -> gpd.GeoDataFrame: + s = SCENARIOS[scenario] + path = ROOT / s["gdb"] + g = gpd.read_file(str(path)) + if g.crs.to_string() != NYC_CRS: + g = g.to_crs(NYC_CRS) + return g + + +def join(assets: gpd.GeoDataFrame, scenario: str) -> gpd.GeoDataFrame: + """Per-asset depth class, or 0 if outside scenario. + + Returns a frame indexed like assets with columns: depth_class, depth_label. + Higher class wins on overlap. + """ + z = load(scenario) + a = assets[["geometry"]].copy() + a["_aid"] = range(len(a)) + j = gpd.sjoin(a, z[["Flooding_Category", "geometry"]], + how="left", predicate="intersects") + # for each asset, take max category hit (3 dominates 1) + cat = (j.groupby("_aid")["Flooding_Category"].max() + .reindex(range(len(a))) + .fillna(0).astype(int)) + out = a[["_aid"]].copy() + out["depth_class"] = cat.values + out["depth_label"] = out["depth_class"].map(lambda c: DEPTH_CLASS.get(c, "outside")) + return out[["depth_class", "depth_label"]].reset_index(drop=True) + + +def label(scenario: str) -> str: + return SCENARIOS[scenario]["label"] + + +def _raster_handles(): + """Per-thread rasterio handle cache. rasterio.DatasetReader is not + safe to share across threads for concurrent .sample() calls; the + FSM runs each request on its own executor thread, so we keep one + handle set per thread.""" + h = getattr(_TLOCAL, "handles", None) + if h is not None: + return h + import rasterio + h = {} + for s in SCENARIOS: + p = BAKED / f"{s}.tif" + if not p.exists(): + return None + h[s] = rasterio.open(str(p)) + _TLOCAL.handles = h + return h + + +def join_raster(pt_geom_2263, scenario: str) -> int: + """Fast path. Returns the integer depth class (0=outside, 1/2/3) for a + single shapely Point in EPSG:2263. Falls back to the GDB join() path + if baked rasters are missing — emits a one-time warning so local dev + still works without the bake artifacts.""" + global _FALLBACK_WARNED + h = _raster_handles() + if h is None: + if not _FALLBACK_WARNED: + log.warning( + "data/baked/dep_*.tif not found — falling back to GDB sjoin. " + "Run: uv run python scripts/bake_cornerstone_rasters.py" + ) + _FALLBACK_WARNED = True + # legacy fallback — wrap point in a one-row GeoDataFrame + a = gpd.GeoDataFrame(geometry=[pt_geom_2263], crs=NYC_CRS) + return int(join(a, scenario).iloc[0]["depth_class"]) + ds = h[scenario] + v = next(ds.sample([(pt_geom_2263.x, pt_geom_2263.y)])) + return int(v[0]) + + +def coverage_for_polygon(polygon, scenario: str, + polygon_crs: str = "EPSG:4326") -> dict: + """Polygon-level summary: what fraction of the input polygon falls into + each depth class for a given DEP scenario? Used in neighborhood mode. + + Returns: + { + 'scenario': scenario id, + 'label': human-readable scenario name, + 'fraction_any': fraction of polygon inside any flooded class, + 'fraction_class': {1: f, 2: f, 3: f} fraction in each class, + 'polygon_area_m2': total polygon area, + } + """ + z = load(scenario) + poly_gdf = gpd.GeoDataFrame(geometry=[polygon], crs=polygon_crs).to_crs(NYC_CRS) + poly_geom = poly_gdf.iloc[0].geometry + poly_ft2 = float(poly_geom.area) + sqft_to_m2 = 0.092903 + fraction_class = {1: 0.0, 2: 0.0, 3: 0.0} + if poly_ft2: + for cat in (1, 2, 3): + sub = z[z["Flooding_Category"] == cat] + if sub.empty: + continue + inter = sub.geometry.intersection(poly_geom) + inter = inter[~inter.is_empty] + ft2 = float(inter.area.sum()) if len(inter) else 0.0 + fraction_class[cat] = round(ft2 / poly_ft2, 4) + fraction_any = round(sum(fraction_class.values()), 4) + return { + "scenario": scenario, + "label": label(scenario), + "fraction_any": fraction_any, + "fraction_class": fraction_class, + "polygon_area_m2": round(poly_ft2 * sqft_to_m2, 1), + } diff --git a/app/flood_layers/ida_hwm.py b/app/flood_layers/ida_hwm.py new file mode 100644 index 0000000000000000000000000000000000000000..b7e802f0c791840860a657cd2b939e63487df80d --- /dev/null +++ b/app/flood_layers/ida_hwm.py @@ -0,0 +1,96 @@ +"""Hurricane Ida (Sept 2021) empirical flood extent — USGS high-water marks. + +This specialist plays the same role as Prithvi-EO 2.0 (Sen1Floods11) +in the parent triangulation-engine: it provides empirical post-event +flood evidence (versus the modeled scenarios from FEMA/DEP). Where +Prithvi derives extent from Sentinel-1 SAR, USGS HWMs are surveyed +ground-truth water marks. Both are valid empirical signals; HWMs +are the public record for Ida specifically. + +Output per address: number of HWMs within radius, max water elevation +(ft), nearest site description. +""" +from __future__ import annotations + +import json +import math +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +DATA = Path(__file__).resolve().parent.parent.parent / "data" / "ida_2021_hwms_ny.geojson" +DOC_ID = "ida_hwm" +CITATION = "USGS STN Hurricane Ida 2021 high-water marks (Event 312, NY)" + + +@dataclass +class HWMSummary: + n_within_radius: int + radius_m: int + max_elev_ft: float | None + max_height_above_gnd_ft: float | None + nearest_dist_m: float | None + nearest_site: str | None + nearest_elev_ft: float | None + sample_sites: list[str] + points: list[dict] | None = None # per-mark for the map layer + + +def _haversine_m(lat1, lon1, lat2, lon2): + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +@lru_cache(maxsize=1) +def _load() -> list[dict]: + if not DATA.exists(): + return [] + with open(DATA) as f: + return json.load(f).get("features", []) + + +def summary_for_point(lat: float, lon: float, radius_m: int = 1000) -> HWMSummary | None: + feats = _load() + if not feats: + return None + in_radius = [] + nearest = (None, float("inf"), None) + for f in feats: + flon, flat = f["geometry"]["coordinates"] + d = _haversine_m(lat, lon, flat, flon) + if d <= radius_m: + in_radius.append((d, f)) + if d < nearest[1]: + nearest = (f, d, None) + nf, nd, _ = nearest + elevs = [f["properties"].get("elev_ft") for _, f in in_radius + if f["properties"].get("elev_ft") is not None] + heights = [f["properties"].get("height_above_gnd") for _, f in in_radius + if f["properties"].get("height_above_gnd") is not None] + sites = [f["properties"].get("site_description") for _, f in in_radius] + sites = [s for s in sites if s][:5] + points = [] + for d, f in in_radius[:50]: # cap so SSE payload stays small + flon, flat = f["geometry"]["coordinates"] + p = f["properties"] + points.append({ + "lat": flat, "lon": flon, + "site": p.get("site_description"), + "elev_ft": p.get("elev_ft"), + "height_above_gnd_ft": p.get("height_above_gnd"), + "distance_m": round(d, 1), + }) + return HWMSummary( + n_within_radius=len(in_radius), + radius_m=radius_m, + max_elev_ft=round(max(elevs), 2) if elevs else None, + max_height_above_gnd_ft=round(max(heights), 2) if heights else None, + nearest_dist_m=round(nd, 0) if nf is not None else None, + nearest_site=nf["properties"].get("site_description") if nf else None, + nearest_elev_ft=nf["properties"].get("elev_ft") if nf else None, + sample_sites=sites, + points=points, + ) diff --git a/app/flood_layers/prithvi_live.py b/app/flood_layers/prithvi_live.py new file mode 100644 index 0000000000000000000000000000000000000000..533663031f9bf9713a0b2a9899fdb81c6031908d --- /dev/null +++ b/app/flood_layers/prithvi_live.py @@ -0,0 +1,563 @@ +"""Prithvi-EO 2.0 (NYC Pluvial v2 fine-tune) live water segmentation. + +A per-query specialist: pulls the most recent low-cloud Sentinel-2 L2A +scene over the address from Microsoft Planetary Computer, runs the +NYC-specialized fine-tune, and reports % water within 500 m. + +Distinct from `app/flood_layers/prithvi_water.py`, which serves the +offline-precomputed 2021 Ida polygons. This one is *fresh observation* +each query — same doc_id (`prithvi_live`), but the underlying model +has been swapped from the Sen1Floods11 base to +`msradam/Prithvi-EO-2.0-NYC-Pluvial` (Apache-2.0, fine-tuned on AMD +Instinct MI300X via AMD Developer Cloud — test flood IoU 0.5979, +6× over the base). The base model is still loadable by setting +RIPRAP_PRITHVI_LIVE_REPO to the IBM repo as a fallback. + +Network calls (STAC search + COG band reads) and a 300M-param model +forward pass make this the slowest specialist after the LLM. Gated by +RIPRAP_PRITHVI_LIVE_ENABLE so deployments without the deps installed +silently skip it. Cloud-cover refuses out at 30%+ to honor the +Sen1Floods11 training distribution. + +License: Apache-2.0. See experiments/shared/licenses.md. +""" + +from __future__ import annotations + +import concurrent.futures +import logging +import os +import threading +import time +from typing import Any + +log = logging.getLogger("riprap.prithvi_live") + +ENABLE = os.environ.get("RIPRAP_PRITHVI_LIVE_ENABLE", "1").lower() in ("1", "true", "yes") +SEARCH_DAYS = int(os.environ.get("RIPRAP_PRITHVI_LIVE_SEARCH_DAYS", "120")) +MAX_CLOUD_PCT = float(os.environ.get("RIPRAP_PRITHVI_LIVE_MAX_CLOUD", "30")) +DEVICE = os.environ.get("RIPRAP_PRITHVI_LIVE_DEVICE", "cpu") + +# Default to the NYC Pluvial v2 fine-tune; override to the IBM-NASA base +# (`ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11`) when the v2 +# artifact is unreachable or for A/B comparisons. +REPO = os.environ.get( + "RIPRAP_PRITHVI_LIVE_REPO", + "msradam/Prithvi-EO-2.0-NYC-Pluvial", +) +BASE_REPO = "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11" + +# Sen1Floods11 expects 6 bands in this exact order. +BANDS = ["B02", "B03", "B04", "B8A", "B11", "B12"] +IMG_SIZE = 512 # Sen1Floods11 training crop +CHIP_PX = 1024 +CHIP_M = CHIP_PX * 10 +HALF_M = CHIP_M / 2 +CENTER_RADIUS_M = 500 +PIXEL_M = 10 + +_MODEL = None +_RUN_MODEL = None +_INIT_LOCK = threading.Lock() # serializes lazy load if multiple threads + # hit fetch() before _MODEL is populated + + +def _has_required_deps() -> tuple[bool, str | None]: + """Probe deps in two tiers. + + Tier 1 — chip fetching (planetary_computer / pystac_client / rioxarray + / xarray / einops) is always required: prithvi_live always pulls a + Sentinel-2 chip from Microsoft Planetary Computer regardless of where + inference runs. + + Tier 2 — local inference (terratorch) is only required when remote + inference is unavailable. On the HF Space we have remote inference + on the AMD MI300X via app/inference.py, so terratorch is not needed + even though chip-fetch is. + + Returns (False, missing) if any required dep is missing. Splitting + the gate this way lets the HF Space deployment fetch chips and run + remote inference even though it doesn't fit terratorch's transitive + dep cone (~250 MB) in the HF build sandbox.""" + chip_deps = ("planetary_computer", "pystac_client", + "rioxarray", "xarray", "einops") + missing = [n for n in chip_deps + if not _has_module(n)] + if missing: + return False, ", ".join(missing) + # Tier 2: only need terratorch if we'd run inference locally. + try: + from app import inference as _inf + if _inf.remote_enabled(): + return True, None + except Exception: + pass + if not _has_module("terratorch"): + return False, "terratorch (local inference)" + return True, None + + +def _has_module(name: str) -> bool: + """True if `name` imports cleanly. ImportError → not installed. + Other exceptions (e.g. torchvision::nms RuntimeError on the HF + Space) → treat as unavailable too; we don't want a clean-skip + intent to crash the FSM at deps-probe time.""" + try: + __import__(name) + return True + except ImportError: + return False + except Exception as e: + log.warning("prithvi_live: %s import raised %s; treating as " + "unavailable", name, type(e).__name__) + return False + + +_DEPS_OK, _DEPS_MISSING = _has_required_deps() + + +def warm(): + """Optional pre-load. The FSM action is lazy too — calling warm() + here just amortizes the first-query cost at app boot.""" + if not ENABLE: + return + try: + _ensure_model() + except Exception: + log.exception("prithvi_live: warm() failed; specialist will no-op") + + +def _ensure_model(): + """Load Prithvi-EO 2.0 once into RAM. + + The v2 NYC Pluvial fine-tune (`msradam/Prithvi-EO-2.0-NYC-Pluvial`) + is **architecturally distinct** from the IBM-NASA Sen1Floods11 + base: v2 ships a `UNetDecoder` + 2-class head, the base ships a + UperNet with PSP / FPN. The model has to be built from each + repo's own config.yaml — there's no key-mapping shim that bridges + them. + + Strategy: + + 1. If the active REPO != BASE_REPO, try to build from the v2 + yaml + v2 ckpt. The v2 yaml's data: paths point at the + training droplet's filesystem (`/root/terramind_nyc/...`) + which doesn't exist locally; that's fine — the + GenericNonGeoSegmentationDataModule constructor only + records the paths, splits aren't read until `setup()`. + 2. On any v2 failure (yaml not present, datamodule constructor + strict, weights mismatch), fall back to the base yaml + base + ckpt. The base path is the proven pre-C5 behaviour. + + The shared `inference.run_model` helper is only published by the + IBM-NASA base repo; we always pull it from there. + """ + global _MODEL, _RUN_MODEL + if _MODEL is not None: + return _MODEL, _RUN_MODEL + with _INIT_LOCK: + if _MODEL is not None: # double-check inside the lock + return _MODEL, _RUN_MODEL + import importlib.util + + from huggingface_hub import hf_hub_download + from terratorch.cli_tools import LightningInferenceModel + log.info("prithvi_live: loading model from %s", REPO) + + # Inference helper only lives in the IBM-NASA base repo. + inference_py = hf_hub_download(BASE_REPO, "inference.py") + + m = None + # ---- v2 path: yaml + ckpt from the published repo ---------- + if REPO != BASE_REPO: + try: + # The v2 repo publishes `prithvi_nyc_phase14.yaml` and + # `prithvi_nyc_pluvial_v2.ckpt`. Be tolerant of small + # naming drift (best_val_loss.ckpt etc.) by probing. + v2_yaml = None + for name in ("prithvi_nyc_phase14.yaml", + "config.yaml", "phase14.yaml", + "prithvi_nyc_v2.yaml"): + try: + v2_yaml = hf_hub_download(REPO, name) + break + except Exception: + continue + v2_ckpt = None + for name in ("prithvi_nyc_pluvial_v2.ckpt", + "best_val_loss.ckpt", "model.ckpt", + "last.ckpt"): + try: + v2_ckpt = hf_hub_download(REPO, name) + break + except Exception: + continue + if v2_yaml and v2_ckpt: + log.info("prithvi_live: building v2 model from " + "yaml=%s ckpt=%s", v2_yaml, v2_ckpt) + m = LightningInferenceModel.from_config(v2_yaml, v2_ckpt) + # prithvi_nyc_phase14.yaml uses GenericNonGeoSegmentationDataModule + # which omits test_transform (→ None) and uses terratorch Normalize + # for aug (only handles 4D/5D). IBM inference.py:run_model() calls + # both on a 3D dict. Patch both to match the IBM base contract: + # ToTensorV2 for test_transform; Kornia AugmentationSequential + # (accepts dict input, adds batch dim) for aug. + if getattr(getattr(m, 'datamodule', None), + 'test_transform', None) is None: + import albumentations as A + import torch as _torch + from albumentations.pytorch import ToTensorV2 + m.datamodule.test_transform = A.Compose([ToTensorV2()]) + _old = m.datamodule.aug + + # IBM's inference.py:188 calls + # `datamodule.aug({'image': tensor})['image']`. + # kornia's AugmentationSequential doesn't accept + # dict input cleanly and tripped the + # `'list' object has no attribute 'view'` + # error on the L4 deploy. Use a hand-rolled + # dict-aware normalizer instead — same math, + # fewer moving parts, no kornia version skew. + class _DictNormalize: + def __init__(self, mean, std): + self.mean = _torch.as_tensor(mean).view(-1, 1, 1).float() + self.std = _torch.as_tensor(std).view(-1, 1, 1).float() + + def __call__(self, sample): + if isinstance(sample, dict): + img = sample["image"] + mean = self.mean.to(img.device) + std = self.std.to(img.device) + return {**sample, "image": (img - mean) / std} + mean = self.mean.to(sample.device) + std = self.std.to(sample.device) + return (sample - mean) / std + + # `_old.means` / `_old.stds` come from the + # yaml as Python lists — calling `.view()` on + # them is what tripped the original + # `'list' object has no attribute 'view'`. + # _DictNormalize handles the conversion via + # torch.as_tensor internally; just pass the + # raw values whatever their type. + m.datamodule.aug = _DictNormalize(_old.means, _old.stds) + log.info("prithvi_live: patched v2 datamodule transforms " + "for IBM inference.py compat (dict-aware Normalize)") + else: + log.warning("prithvi_live: v2 yaml/ckpt not " + "discoverable in %s; falling back to base", + REPO) + except Exception as e: + log.warning("prithvi_live: v2 build failed (%s); " + "falling back to base", e) + m = None + + # ---- base path: proven IBM-NASA Sen1Floods11 fine-tune ----- + if m is None: + base_config = hf_hub_download(BASE_REPO, "config.yaml") + base_ckpt = hf_hub_download( + BASE_REPO, "Prithvi-EO-V2-300M-TL-Sen1Floods11.pt") + m = LightningInferenceModel.from_config(base_config, base_ckpt) + + m.model.eval() + if DEVICE == "cuda": + try: + import torch + if torch.cuda.is_available(): + m.model.cuda() + except Exception: + log.exception("prithvi_live: cuda move failed") + + spec = importlib.util.spec_from_file_location("_prithvi_inference", + inference_py) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + _MODEL = m + _RUN_MODEL = mod.run_model + return _MODEL, _RUN_MODEL + + +def _search_recent_scene(lat: float, lon: float): + """Most recent low-cloud S2 L2A item near (lat, lon) in the last + SEARCH_DAYS days, or None.""" + import datetime as dt + + import planetary_computer as pc + from pystac_client import Client + end = dt.datetime.utcnow().date() + start = end - dt.timedelta(days=SEARCH_DAYS) + client = Client.open( + "https://planetarycomputer.microsoft.com/api/stac/v1", + modifier=pc.sign_inplace, + ) + delta = 0.02 + search = client.search( + collections=["sentinel-2-l2a"], + bbox=[lon - delta, lat - delta, lon + delta, lat + delta], + datetime=f"{start}/{end}", + query={"eo:cloud_cover": {"lt": MAX_CLOUD_PCT}}, + max_items=20, + ) + items = sorted( + search.items(), + key=lambda it: (it.properties.get("eo:cloud_cover", 100), + -(it.datetime.timestamp() if it.datetime else 0)), + ) + return items[0] if items else None + + +def _build_chip(item, lat: float, lon: float): + """Returns (img, ref_da, epsg) — img is the (6, H, W) center-cropped + float32 array; ref_da is the rioxarray DataArray of the reference + band BEFORE the center crop (kept so we can compute the affine + transform for polygonization in EPSG:4326).""" + import numpy as np + import rioxarray # noqa: F401 + import xarray as xr + from pyproj import Transformer + if "proj:epsg" in item.properties: + epsg = int(item.properties["proj:epsg"]) + else: + code = item.properties.get("proj:code", "") + if code.startswith("EPSG:"): + epsg = int(code.split(":", 1)[1]) + else: + raise RuntimeError("STAC item missing proj:epsg / proj:code") + fwd = Transformer.from_crs("EPSG:4326", f"EPSG:{epsg}", always_xy=True) + cx, cy = fwd.transform(lon, lat) + xmin, xmax = cx - HALF_M, cx + HALF_M + ymin, ymax = cy - HALF_M, cy + HALF_M + ref = rioxarray.open_rasterio(item.assets[BANDS[0]].href, masked=False).squeeze(drop=True) + ref = ref.rio.clip_box(minx=xmin, miny=ymin, maxx=xmax, maxy=ymax) + ref = ref.isel(y=slice(0, CHIP_PX), x=slice(0, CHIP_PX)) + arrs = [ref.astype("float32")] + for b in BANDS[1:]: + da = rioxarray.open_rasterio(item.assets[b].href, masked=False).squeeze(drop=True) + da = da.rio.clip_box(minx=xmin, miny=ymin, maxx=xmax, maxy=ymax) + if da.shape != ref.shape: + da = da.rio.reproject_match(ref) + arrs.append(da.astype("float32")) + stacked = xr.concat(arrs, dim="band", join="override").assign_coords(band=BANDS) + img = stacked.values # (6, H, W) + # Center crop to IMG_SIZE x IMG_SIZE. + _, h, w = img.shape + sy, sx = (h - IMG_SIZE) // 2, (w - IMG_SIZE) // 2 + img = img[:, sy:sy + IMG_SIZE, sx:sx + IMG_SIZE] + if img.mean() > 1: + img = img / 10000.0 + return np.nan_to_num(img.astype("float32")), ref, epsg + + +def _polygonize_mask(pred, ref_da, epsg: int) -> dict | None: + """Vectorize the binary water mask into an EPSG:4326 GeoJSON + FeatureCollection so the frontend can paint it on the MapLibre + map. Returns None on failure (best-effort — never raises into the + caller path).""" + try: + import json + + import geopandas as gpd + from rasterio.features import shapes + from rasterio.transform import from_origin + from shapely.geometry import shape + # Reconstruct the affine transform of the center-cropped pred. + # ref_da has 1024 px at 10 m; we cropped to the central 512. + xs = ref_da.x.values + ys = ref_da.y.values + if len(xs) < IMG_SIZE or len(ys) < IMG_SIZE: + return None + # rioxarray gives pixel-centered coords; offset by half a pixel + # to the upper-left to build a from_origin transform. + sy = (len(ys) - IMG_SIZE) // 2 + sx = (len(xs) - IMG_SIZE) // 2 + # ys are descending (top-to-bottom); take the top of the crop. + top_y = float(ys[sy]) + (PIXEL_M / 2.0) + left_x = float(xs[sx]) - (PIXEL_M / 2.0) + transform = from_origin(left_x, top_y, PIXEL_M, PIXEL_M) + # Polygonize only the water class (1). + mask = (pred == 1).astype("uint8") + polys = [] + for geom, value in shapes(mask, mask=mask.astype(bool), + transform=transform): + if value != 1: + continue + polys.append(shape(geom)) + if not polys: + return {"type": "FeatureCollection", "features": []} + gdf = gpd.GeoDataFrame({"geometry": polys}, + crs=f"EPSG:{epsg}").to_crs("EPSG:4326") + # Simplify slightly to keep the SSE payload small (10 m raster + # over 5 km square = up to ~10 k tiny squares; simplification + # collapses adjacent water pixels into smooth polygons). + gdf["geometry"] = gdf.geometry.simplify(0.00005, preserve_topology=True) + return json.loads(gdf.to_json()) + except Exception: + log.exception("prithvi_live: polygonize failed") + return None + + +def _fetch_inner(lat: float, lon: float, timeout_s: float) -> dict[str, Any]: + """Core fetch logic — run inside a bounded thread via fetch().""" + t0 = time.time() + try: + item = _search_recent_scene(lat, lon) + if item is None: + return {"ok": False, "skipped": f"no <{MAX_CLOUD_PCT}% cloud " + f"S2 in last {SEARCH_DAYS}d"} + cc = float(item.properties.get("eo:cloud_cover", -1)) + if time.time() - t0 > timeout_s: + return {"ok": False, "skipped": "stac search exceeded budget"} + img, ref_da, epsg = _build_chip(item, lat, lon) + if time.time() - t0 > timeout_s: + return {"ok": False, "skipped": "chip build exceeded budget"} + + # v0.4.5 — try the MI300X inference service first if configured. + # On RemoteUnreachable (service down / not configured / 5xx) fall + # through to the local terratorch path. When remote is configured + # but returns non-ok we surface that signal directly: the local + # path on this machine has been brittle (v2 datamodule + # `test_transform=None` race), so a configured remote is more + # reliable than the fallback. + remote_attempted = False + try: + from app import inference as _inf + if _inf.remote_enabled(): + remote_attempted = True + remote = _inf.prithvi_pluvial( + img, scene_id=item.id, + scene_datetime=str(item.datetime), + cloud_cover=cc, + timeout=timeout_s, + ) + if remote.get("ok"): + # Vectorize the remote prediction raster so the map + # actually renders the live water polygons. The + # droplet returns `pred_b64` (uint8 binary mask); + # we polygonize against the chip's WGS84 bounds + # which we know locally from `ref_da`. + polys = None + pred_b64 = remote.get("pred_b64") + pred_shape = remote.get("pred_shape") + if pred_b64 and pred_shape: + try: + xs = ref_da.x.values + ys = ref_da.y.values + from pyproj import Transformer + t_inv = Transformer.from_crs( + f"EPSG:{epsg}", "EPSG:4326", + always_xy=True) + minx, maxx = float(xs.min()), float(xs.max()) + miny, maxy = float(ys.min()), float(ys.max()) + minlon, minlat = t_inv.transform(minx, miny) + maxlon, maxlat = t_inv.transform(maxx, maxy) + from app.context._polygonize import ( + polygonize_binary_mask, + ) + polys = polygonize_binary_mask( + pred_b64, pred_shape, + (minlon, minlat, maxlon, maxlat), + label="water", fill_color="#1F77B4", + simplify_tolerance=2e-5, + ) + except Exception: + log.exception("prithvi_live: remote polygonize failed") + polys = None + return { + "ok": True, + "item_id": item.id, + "item_datetime": str(item.datetime), + "cloud_cover": cc, + "pct_water_full": remote.get("pct_water_full"), + "pct_water_within_500m": remote.get("pct_water_within_500m"), + "polygons_geojson": polys, + "compute": f"remote · {remote.get('device', 'gpu')}", + "elapsed_s": round(time.time() - t0, 2), + } + err = (remote.get("err") + or remote.get("error") + or remote.get("skipped") + or "unknown") + return {"ok": False, + "skipped": f"remote prithvi-pluvial non-ok: {err}", + "elapsed_s": round(time.time() - t0, 2)} + except _inf.RemoteUnreachable as e: + log.info("prithvi_live: remote unreachable (%s)", e) + if remote_attempted: + # Don't fall to local — torchvision::nms is broken on the + # CPU-tier UI Spaces and crashes the FSM specialist with + # a confusing RuntimeError. Return a clean skipped row so + # the trace says "remote unreachable" instead. + return {"ok": False, + "skipped": f"remote prithvi-pluvial unreachable: {e}", + "elapsed_s": round(time.time() - t0, 2)} + except Exception as e: + log.exception("prithvi_live: remote call failed") + if remote_attempted: + return {"ok": False, + "skipped": f"remote prithvi-pluvial error: " + f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} + + # Local fallback — the path that's been live since v0.4.4. + # Reached only when remote_attempted is False (i.e. remote + # backend not configured at all). + model, run_model = _ensure_model() + x = img[None, :, None, :, :] # (1, 6, 1, H, W) + pred_t = run_model(x, None, None, model.model, model.datamodule, IMG_SIZE) + import numpy as np + pred = pred_t[0].cpu().numpy().astype("uint8") + pct_full = float(100.0 * pred.mean()) + yy, xx = np.indices(pred.shape) + cy, cx = pred.shape[0] // 2, pred.shape[1] // 2 + radius_px = CENTER_RADIUS_M / PIXEL_M + circle = (yy - cy) ** 2 + (xx - cx) ** 2 <= radius_px ** 2 + pct_500 = float(100.0 * pred[circle].mean()) if circle.sum() else 0.0 + polygons_geojson = _polygonize_mask(pred, ref_da, epsg) + return { + "ok": True, + "item_id": item.id, + "item_datetime": str(item.datetime), + "cloud_cover": cc, + "pct_water_full": pct_full, + "pct_water_within_500m": pct_500, + "polygons_geojson": polygons_geojson, + "compute": "local", + "elapsed_s": round(time.time() - t0, 2), + } + except Exception as e: + log.exception("prithvi_live: fetch failed") + return {"ok": False, "err": f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} + + +def fetch(lat: float, lon: float, timeout_s: float = 60.0) -> dict[str, Any]: + """Run the specialist. Wraps _fetch_inner in a bounded thread so that + STAC searches and COG band reads (which lack per-request HTTP timeouts) + cannot hang the FSM indefinitely. + + Returns a dict with at minimum: + { "ok": bool, "skipped": str | None, "item_id": str | None, + "cloud_cover": float | None, "pct_water_within_500m": float | None } + Designed to never raise; failures show up as ok=False with an `err`. + """ + if not ENABLE: + return {"ok": False, "skipped": "RIPRAP_PRITHVI_LIVE_ENABLE=0"} + if not _DEPS_OK: + return {"ok": False, + "skipped": f"deps unavailable on this deployment: " + f"{_DEPS_MISSING}"} + hard_timeout = timeout_s + 15.0 + from app import emissions as _emissions + _parent_tracker = _emissions.current() + with concurrent.futures.ThreadPoolExecutor( + max_workers=1, + initializer=lambda t=_parent_tracker: _emissions.install(t), + ) as pool: + future = pool.submit(_fetch_inner, lat, lon, timeout_s) + try: + return future.result(timeout=hard_timeout) + except concurrent.futures.TimeoutError: + log.warning("prithvi_live: hard timeout after %.0fs (STAC/COG hung)", + hard_timeout) + return {"ok": False, + "skipped": f"prithvi_live timed out after {hard_timeout:.0f}s"} diff --git a/app/flood_layers/prithvi_water.py b/app/flood_layers/prithvi_water.py new file mode 100644 index 0000000000000000000000000000000000000000..f30bc57ded04d8282cbee5a96de25d46cb055e72 --- /dev/null +++ b/app/flood_layers/prithvi_water.py @@ -0,0 +1,120 @@ +"""Prithvi-EO 2.0 (Sen1Floods11) satellite flood inundation specialist. + +The 300M-parameter Prithvi-EO foundation model (NASA/IBM, Apache-2.0) +was run twice offline on Hurricane Ida 2021 pre/post HLS Sentinel-2 +scenes over central NYC: + + pre : HLS.S30.T18TWK.2021237T153809 (2021-08-25, 3% cloud) + post: HLS.S30.T18TWK.2021245T154911 (2021-09-02, 1% cloud, + ~12 hours after peak rainfall) + +The diff (post-water minus pre-water, filtered to ≥3-cell polygons) +isolates surface water present 12 hours after Ida that wasn't present +the prior week — i.e., candidate Ida-attributable inundation. We ship +the resulting polygons as a flood-layer specialist; per query we +compute proximity from the address to the nearest such polygon. + +Honest scope: +- Sub-surface flooding (subway entrances, basement apartments — the + dominant Ida damage mode in NYC) is not visible to optical satellites. +- Pluvial street water had largely drained by the Sep 2 16:02Z pass, + so the residual Prithvi signal mostly captures marsh ponding, + riverside spillover, and low-lying park inundation. +- The model fired on Ida itself (a real flood event), not a synthetic + fallback — that's the architectural value. +""" +from __future__ import annotations + +import json +import math +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +DATA_DIR = Path(__file__).resolve().parent.parent.parent / "data" +DOC_ID = "prithvi_water" +CITATION = ("Prithvi-EO-2.0-300M-TL-Sen1Floods11 (NASA/IBM, Apache-2.0, via " + "TerraTorch). Hurricane Ida pre/post diff: pre HLS T18TWK " + "2021-08-25 (3% cloud), post HLS T18TWK 2021-09-02 (1% cloud, " + "~12h after peak rainfall).") + + +@dataclass +class PrithviSummary: + inside_water_polygon: bool + nearest_distance_m: float | None + n_polygons_within_500m: int + scene_id: str + scene_date: str + + +def _haversine_m(lat1, lon1, lat2, lon2): + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +@lru_cache(maxsize=1) +def _load(): + """Load the merged Prithvi water mask (combined across NYC MGRS tiles) + as a GeoDataFrame in NYC state plane (EPSG:2263) for fast metric + distance queries.""" + import geopandas as gpd + # Prefer the Ida flood-event diff (real flood-attribution signal); + # fall back to clear-day permanent-water masks if the Ida file is absent. + candidates = [ + DATA_DIR / "prithvi_ida_2021.geojson", + DATA_DIR / "prithvi_flood_nyc.geojson", + ] + candidates += sorted(DATA_DIR.glob("prithvi_flood_*.geojson"), reverse=True) + path = next((p for p in candidates if p.exists()), None) + if path is None: + return None, None + with open(path) as f: + meta = json.load(f) + g = gpd.read_file(path) + if g.crs is None: + g.set_crs("EPSG:4326", inplace=True) + g = g.to_crs("EPSG:2263") + return g, meta + + +def warm() -> None: + _load() + + +def summary_for_point(lat: float, lon: float) -> PrithviSummary | None: + import geopandas as gpd + from shapely.geometry import Point + g, meta = _load() + if g is None: + return None + pt_wgs = gpd.GeoSeries([Point(lon, lat)], crs="EPSG:4326") + pt_2263 = pt_wgs.to_crs("EPSG:2263").iloc[0] + inside = bool(g.contains(pt_2263).any()) + + # nearest distance (feet -> metres) + distances_ft = g.geometry.distance(pt_2263) + nearest_ft = float(distances_ft.min()) if len(distances_ft) else None + nearest_m = round(nearest_ft / 3.281, 1) if nearest_ft is not None else None + + within_500m = int((distances_ft <= 500 * 3.281).sum()) + + # The Ida pre/post artifact carries pre_/post_ scene info; the clear-day + # artifact carries scene_ids[]. Format compactly for either case. + if "post_scene_id" in meta: + sid = f"pre {meta['pre_scene_id']} | post {meta['post_scene_id']}" + sdate = f"pre {meta['pre_scene_date']}, post {meta['post_scene_date']}" + else: + sid = meta.get("scene_id") or ", ".join(meta.get("scene_ids", []) or ["unknown"]) + sdate = meta.get("scene_date") or ", ".join(meta.get("scene_dates", []) or ["unknown"]) + + return PrithviSummary( + inside_water_polygon=inside, + nearest_distance_m=nearest_m, + n_polygons_within_500m=within_500m, + scene_id=sid, + scene_date=sdate, + ) diff --git a/app/flood_layers/sandy_inundation.py b/app/flood_layers/sandy_inundation.py new file mode 100644 index 0000000000000000000000000000000000000000..4a93dd150ca360dca36235182c0eacf14ded110c --- /dev/null +++ b/app/flood_layers/sandy_inundation.py @@ -0,0 +1,110 @@ +"""NYC Sandy Inundation Zone (empirical 2012 extent, NYC OD 5xsi-dfpx). + +Two query paths exist: + inside_raster(point) — fast path. Samples data/baked/sandy.tif. + ~1 ms; used by step_sandy in the FSM. + join(assets) — legacy GeoJSON sjoin path. Retained as a + fallback when the baked raster is absent (local dev) and + for coverage_for_polygon (neighborhood mode). +""" +from __future__ import annotations + +import logging +import threading +from functools import lru_cache + +import geopandas as gpd + +from app.spatial import DATA, NYC_CRS, load_layer + +DOC_ID = "sandy_inundation" +CITATION = "NYC Sandy Inundation Zone (NYC OpenData 5xsi-dfpx, empirical 2012 extent)" + +log = logging.getLogger(__name__) +BAKED = DATA / "baked" +_TLOCAL = threading.local() +_FALLBACK_WARNED = False + + +@lru_cache(maxsize=1) +def load() -> gpd.GeoDataFrame: + g = load_layer(DATA / "sandy_inundation.geojson") + return g[["geometry"]] + + +def join(assets: gpd.GeoDataFrame) -> gpd.pd.Series: + """Return a boolean Series indexed like assets: True if inside Sandy zone.""" + z = load() + # spatial join avoids fragile unary union over messy public polygons + hits = gpd.sjoin( + assets[["geometry"]].assign(_aid=range(len(assets))), + z[["geometry"]], + how="left", + predicate="intersects", + ) + flagged = hits.dropna(subset=["index_right"])["_aid"].unique() + s = assets.geometry.copy().astype(bool) + s[:] = False + s.iloc[list(flagged)] = True + return s.reset_index(drop=True) + + +def _raster_handle(): + """Per-thread rasterio handle. See dep_stormwater._raster_handles.""" + h = getattr(_TLOCAL, "handle", None) + if h is not None: + return h + p = BAKED / "sandy.tif" + if not p.exists(): + return None + import rasterio + h = rasterio.open(str(p)) + _TLOCAL.handle = h + return h + + +def inside_raster(pt_geom_2263) -> bool: + """Fast path. True if the shapely Point (in EPSG:2263) falls inside the + 2012 Sandy inundation extent. Falls back to the GeoJSON sjoin path if + data/baked/sandy.tif is missing.""" + global _FALLBACK_WARNED + h = _raster_handle() + if h is None: + if not _FALLBACK_WARNED: + log.warning( + "data/baked/sandy.tif not found — falling back to GeoJSON sjoin. " + "Run: uv run python scripts/bake_cornerstone_rasters.py" + ) + _FALLBACK_WARNED = True + a = gpd.GeoDataFrame(geometry=[pt_geom_2263], crs=NYC_CRS) + return bool(join(a).iloc[0]) + v = next(h.sample([(pt_geom_2263.x, pt_geom_2263.y)])) + return bool(int(v[0])) + + +def coverage_for_polygon(polygon, polygon_crs: str = "EPSG:4326") -> dict: + """Polygon-level summary: what fraction of the input polygon overlaps + the 2012 Sandy inundation extent? Used in neighborhood-mode queries. + + Returns: + { + 'overlap_area_m2': absolute overlap in m2, + 'polygon_area_m2': total polygon area in m2, + 'fraction': overlap / polygon_area, range [0, 1], + 'inside': True if any overlap exists, + } + """ + z = load().to_crs("EPSG:2263") # NY State Plane Long Island, units = ft + poly_gdf = gpd.GeoDataFrame(geometry=[polygon], crs=polygon_crs).to_crs("EPSG:2263") + poly_geom = poly_gdf.iloc[0].geometry + inter = z.intersection(poly_geom) + inter = inter[~inter.is_empty] + overlap_ft2 = float(inter.area.sum()) if len(inter) else 0.0 + poly_ft2 = float(poly_geom.area) + sqft_to_m2 = 0.092903 + return { + "overlap_area_m2": round(overlap_ft2 * sqft_to_m2, 1), + "polygon_area_m2": round(poly_ft2 * sqft_to_m2, 1), + "fraction": round(overlap_ft2 / poly_ft2, 4) if poly_ft2 else 0.0, + "inside": overlap_ft2 > 0, + } diff --git a/app/framing.py b/app/framing.py new file mode 100644 index 0000000000000000000000000000000000000000..a05668ecbe97a740a0f7cd0db943faaff8cfdf9d --- /dev/null +++ b/app/framing.py @@ -0,0 +1,249 @@ +"""Question-aware framing for the Capstone briefing opening. + +The four-section structure (Status / Empirical / Modeled / Policy) is +load-bearing for the Mellea grounding checks and stays unchanged. What +this module does is detect the *shape* of the user's question from the +raw query string + planner intent, then return a single-sentence +directive that conditions only the opening Status sentence. + +Eleven question types are recognised; they mirror the rubric in +`tests/integration/stakeholder_queries.py:FRAMING_RUBRICS`. Detection +is deterministic regex matching — no extra LLM call, no added latency. + +Usage: + + from app.framing import augment_system_prompt + system_prompt = augment_system_prompt( + EXTRA_SYSTEM_PROMPT, query=user_query, intent=plan.intent, + ) + +The returned prompt has the original text plus a trailing +`QUESTION-AWARE OPENING:` block. Granite 4.1 attends to this through +the system-prompt cache and applies it to the Status sentence. +""" +from __future__ import annotations + +import re +from typing import Final + +QUESTION_TYPES: Final[tuple[str, ...]] = ( + "habitability_decision", + "legal_disclosure", + "capital_planning", + "underwriting", + "journalism", + "development_siting", + "grant_evidence", + "retrospective", + "emergency_response", + "comparison", + "generic_exposure", +) + + +# ---- Per-type opening directives ------------------------------------------ +# +# Each directive is one sentence that supplements (does not replace) the +# Status section's existing instruction. Granite 4.1 has a strong prior +# toward "this address is exposed to ..." openings; the directive +# overrides that in a question-shaped way without disturbing the four +# grounding invariants. + +_DIRECTIVES: dict[str, str] = { + "habitability_decision": ( + "The Status sentence MUST start with a direct verdict word " + "(\"Yes\" if the documents show meaningful flood evidence, \"No\" " + "if they don't), then name the single strongest piece of " + "evidence with its [doc_id]. The user is deciding whether to " + "live here — answer the question, then cite." + ), + "legal_disclosure": ( + "The Status sentence MUST state whether the documents contain " + "facts a NY RPL §462(2) or §231-b disclosure would need to " + "record. Begin with \"Disclosure is warranted\" or \"Disclosure " + "is not triggered\" based on the evidence, then name the " + "specific fact with its [doc_id]. The user is a real-estate " + "professional checking the disclosure threshold." + ), + "capital_planning": ( + "The Status sentence MUST frame the place as a capital-planning " + "candidate: name the dominant exposure with its [doc_id] and " + "indicate whether the evidence supports prioritization " + "(\"merits prioritization\", \"ranks high for hardening\") or " + "not. The user allocates infrastructure investment." + ), + "underwriting": ( + "The Status sentence MUST emphasize that every figure in the " + "briefing is independently sourced — open with the dominant " + "exposure and the specific [doc_id], then add a half-clause " + "noting that the audit chain follows below. The user is an " + "underwriter who needs a defensible loss narrative." + ), + "journalism": ( + "The Status sentence MUST be reproducible reporting prose: " + "name the place, name the dominant exposure with [doc_id], " + "and avoid editorial verbs like \"shocking\" or \"alarming\". " + "The user is a data journalist who will cite this prose verbatim." + ), + "development_siting": ( + "The Status sentence MUST start with the count of active " + "construction filings cited from [dob_permits] (e.g. \"N " + "active construction filings sit inside ...\") and indicate " + "which flood layer they intersect. The user is a developer or " + "architect doing a pre-design siting check." + ), + "grant_evidence": ( + "The Status sentence MUST open with \"Vulnerability " + "assessment:\" and name the place + dominant exposure with " + "[doc_id]. Treat the briefing as the evidence section of a " + "HUD CDBG-DR or FEMA BRIC application — formal, third-person, " + "free of advocacy framing." + ), + "retrospective": ( + "Riprap currently runs on present-day data sources. The Status " + "sentence MUST acknowledge the question is retrospective and " + "state explicitly that the briefing reflects the CURRENT state " + "of these data sources, not a snapshot from the requested date. " + "Then proceed with the present-day exposure picture so the user " + "still gets the geography. Silence-over-confabulation: never " + "reconstruct historical conditions you can't verify." + ), + "emergency_response": ( + "The Status sentence MUST quantify what is at risk in the " + "next few hours, citing the live signal that triggered the " + "query and any active alerts with [doc_id]. The user needs an " + "operational picture, not a historical exposure summary." + ), + "comparison": ( + "The Status sentence MUST name BOTH places the user is " + "comparing and indicate which one shows greater exposure on " + "the strongest cited signal. If only one place's data is " + "available in the documents, say so explicitly. The user is " + "doing a head-to-head decision." + ), + "generic_exposure": "", # default — no override +} + + +# ---- Detector ------------------------------------------------------------- +# +# Patterns are ordered: the FIRST type whose pattern matches wins. Order +# matters — more specific question shapes (legal_disclosure, grant_evidence, +# emergency_response) come before more general ones (habitability_decision, +# capital_planning) so the obvious specialist tags don't get swallowed. + +_PATTERNS: list[tuple[str, list[re.Pattern]]] = [ + ("retrospective", [ + re.compile(r"\b(would have|would Riprap|on (the )?date of|as of (the )?(date|day)|" + r"day before|prior to|before (Hurricane|Ida|Sandy|the storm)|" + r"on (August|September|October|November|December|January|February|March|" + r"April|May|June|July) \d{1,2},? ?\d{4}|" + r"time.?machine|retrospective|court (exhibit|testimony))\b", re.I), + ]), + ("emergency_response", [ + re.compile(r"\b(just triggered|right now|next (few |six |\d+ )?hours?|" + r"in the next \d+|currently flooding|flood (warning|watch) is active|" + r"sensor [A-Z]{2}-?\d+|live (alert|trigger))\b", re.I), + ]), + ("legal_disclosure", [ + re.compile(r"\b(disclos(e|ure|ed)|RPL\s*§?\s*\d+|Property Condition Disclosure|" + r"§\s*462|§\s*231-?b|seller'?s? disclosure|landlord'?s? disclosure|" + r"required to disclose|need to disclose)\b", re.I), + ]), + ("grant_evidence", [ + re.compile(r"\b(vulnerability assessment|CDBG-?DR|HUD|BRIC|" + r"grant application|funding application|community resilience grant|" + r"FEMA application|disaster recovery (application|funding))\b", re.I), + ]), + ("development_siting", [ + re.compile(r"\b(what (are|is) (they|being) build(ing)?|new construction|" + r"under construction|active (construction|filing|project|permit)|" + r"projects? (in progress|underway|planned)|architects?|" + r"siting check|pre.?design|" + r"DOB filing|developer)\b", re.I), + ]), + ("comparison", [ + # `prioritize X over Y` can have many words between, hence the + # bounded non-greedy span — capped at 80 chars to avoid runaway. + re.compile(r"\b(compare\b|comparison|\bvs\b|\bversus\b|" + r"head-?to-?head|\brank\s+the\s+top)\b", re.I), + re.compile(r"\bprioritize\b.{1,80}\bover\b", re.I | re.S), + re.compile(r"\bover\s+\w+(?:\s+\w+){0,3}\s+for\s+(hardening|investment)\b", re.I), + ]), + ("capital_planning", [ + re.compile(r"\b(prioritiz(e|ation)|capital plan(ning)?|harden(ing|s)?|" + r"infrastructure investment|where (should|to) (we |the )(invest|" + r"prioritize|harden)|MTA.+prioritize|DEP.+prioritize|" + r"protection envelope|outside (it|the protection)|" + r"resilien(ce|cy) project)\b", re.I), + ]), + ("habitability_decision", [ + re.compile(r"\b(should I worry|should I (be|consider)|is (it|this) safe|" + r"can I (rent|live|move|raise (my )?kids?)|considering (renting|leasing|moving)|" + r"(thinking about|planning to) (rent|lease|move|buy)|" + r"is (this|that|the landlord) true|landlord (says|claims|told)|" + r"no flood history|just got a lease|new lease|signing a lease|" + r"\bworry\b)", re.I), + ]), + ("underwriting", [ + re.compile(r"\b(underwrit(e|er|ing|able)|actuarial|loss history|" + r"insurabl[ey]|catastrophe (model|risk)|" + r"insurance (audit|memo|profile)|" + r"audit (chain|trail))\b", re.I), + ]), + ("journalism", [ + re.compile(r"\b(reporter|journalist|newsroom|story|coverage|" + r"published?|publish (this|the))", re.I), + ]), +] + + +def detect(query: str, intent: str | None = None) -> str: + """Classify the question shape from the raw query and planner intent. + + Returns one of `QUESTION_TYPES`. Falls back to `generic_exposure` + when no pattern matches — that's the existing behavior, preserved. + + `intent` is currently advisory only (the patterns don't read it), + but the parameter is part of the API so future refinements can + use it (e.g. an `intent=neighborhood` query without a verdict + keyword could default to `journalism` rather than `generic_exposure`). + """ + if not query: + return "generic_exposure" + q = query.strip() + for qt, patterns in _PATTERNS: + if any(p.search(q) for p in patterns): + return qt + # Heuristic fallback: bare neighborhood/borough names from a planner + # context default to journalism (most common stakeholder reading a + # neighborhood-only query is a reporter or planner). For + # single_address with no question keyword, fall back to generic. + if intent == "neighborhood" and len(q.split()) <= 3: + return "journalism" + return "generic_exposure" + + +def opening_instruction(question_type: str) -> str: + """Return the directive sentence(s) for a question type. + Returns empty string for `generic_exposure` (no override).""" + return _DIRECTIVES.get(question_type, "") + + +def augment_system_prompt(base: str, *, query: str, + intent: str | None = None) -> str: + """Wrap a base system prompt with a question-aware opening directive. + + No-op when the detector returns `generic_exposure` — the original + behavior is preserved. + """ + qt = detect(query, intent) + directive = opening_instruction(qt) + if not directive: + return base + return ( + f"{base}\n\n" + f"QUESTION-AWARE OPENING (this directive overrides ONLY the opening " + f"**Status.** sentence; the four-section structure and citation " + f"discipline above remain in force):\n{directive}" + ) diff --git a/app/fsm.py b/app/fsm.py new file mode 100644 index 0000000000000000000000000000000000000000..6b3f63336a871eb70479f07e0be24c98b62db56d --- /dev/null +++ b/app/fsm.py @@ -0,0 +1,1394 @@ +"""Riprap Burr FSM — linear specialist pipeline for one address. + +Each action either produces a structured fact (which becomes a document +the reconciler can cite) or stays silent on failure. The reconciler +(Granite 4.1) only sees documents from specialists that actually +produced data — the silence-over-confabulation contract. +""" +from __future__ import annotations + +import logging +import threading as _threading +import time +from typing import Any + +import geopandas as gpd +from burr.core import ApplicationBuilder, State, action +from shapely.geometry import Point + +from app import emissions +from app.context import floodnet, microtopo, noaa_tides, npcc4_slr, nws_alerts, nws_obs, nyc311 +from app.energy import estimate as energy_estimate +from app.flood_layers import dep_stormwater, ida_hwm, prithvi_water, sandy_inundation +from app.geocode import geocode_one +from app.live import floodnet_forecast as fn_forecast +from app.live import ttm_forecast +from app.rag import retrieve as rag_retrieve +from app.reconcile import citations_from_docs, reconcile as run_reconcile +from app.registers import doe_schools as r_schools +from app.registers import doh_hospitals as r_hospitals +from app.registers import mta_entrances as r_mta +from app.registers import nycha as r_nycha + +log = logging.getLogger("riprap.fsm") + +# NYC five-borough bbox. Specialists whose data sources are NYC-only +# (Sandy 2012, NYC DEP Stormwater, FloodNet, NYC 311, NYC microtopo +# raster, NYC Hurricane Ida Prithvi polygons) skip with an explicit +# "out of NYC scope" reason when geocode lands outside this envelope. +# Live specialists (NWS / NOAA / TTM) and the NY-State Ida HWMs run +# unconditionally. +_NYC_S, _NYC_W, _NYC_N, _NYC_E = 40.49, -74.27, 40.92, -73.69 + + +def _in_nyc(lat, lon) -> bool: + if lat is None or lon is None: + return False + return _NYC_S <= lat <= _NYC_N and _NYC_W <= lon <= _NYC_E + +# Thread-local hook so the streaming endpoint can subscribe to per-token +# Granite output during reconcile, without threading a callback through +# every Burr action signature. +_FSM_LOCAL = _threading.local() + + +def set_token_callback(on_token): + """Install a per-thread on_token(delta) callable for the next reconcile. + Pass None to clear.""" + _FSM_LOCAL.on_token = on_token + + +def _current_token_callback(): + return getattr(_FSM_LOCAL, "on_token", None) + + +def set_mellea_attempt_callback(fn): + _FSM_LOCAL.on_mellea_attempt = fn + + +def _current_mellea_attempt_callback(): + return getattr(_FSM_LOCAL, "on_mellea_attempt", None) + + +def set_strict_mode(strict: bool): + """Per-thread flag — when True the linear FSM's reconcile step routes + through Mellea-validated rejection sampling instead of the standard + streaming reconciler. Disables token streaming for that step.""" + _FSM_LOCAL.strict = bool(strict) + + +def _current_strict_mode() -> bool: + return bool(getattr(_FSM_LOCAL, "strict", False)) + + +def set_planned_specialists(spec_names): + """Install a per-thread set of specialist names from the planner. + + Used by step_reconcile to trim doc messages: documents whose family + prefix doesn't match any planned specialist are dropped before the + Mellea call. Cuts ~30-50% of prompt tokens on local Ollama, where + the FSM otherwise hands the reconciler every specialist's output + even if the planner only asked for a subset.""" + _FSM_LOCAL.planned_specialists = set(spec_names) if spec_names else None + + +def _current_planned_specialists(): + return getattr(_FSM_LOCAL, "planned_specialists", None) + + +def set_user_query(query: str | None): + """Install the user's original natural-language query for question-aware + framing in step_reconcile. The FSM's state["query"] is the geocoder + input (often just the street address), which doesn't carry the + user's question shape — set this separately so Capstone can detect + 'should I worry' / 'is disclosure required' / etc.""" + _FSM_LOCAL.user_query = query + + +def _current_user_query() -> str | None: + return getattr(_FSM_LOCAL, "user_query", None) + + +def set_planner_intent(intent: str | None): + """Install the planner's classified intent so step_reconcile can pass + it to the framing detector as a tiebreaker on bare-place queries.""" + _FSM_LOCAL.planner_intent = intent + + +def _current_planner_intent() -> str | None: + return getattr(_FSM_LOCAL, "planner_intent", None) + + +# Canonical Burr: one action per specialist, sequential transitions. +# A previous version of this module wrapped 16 specialists in a single +# fan-out action that ran them concurrently in a ThreadPoolExecutor; +# that path was removed because it sometimes hung after the fan-out +# completed (Burr-internal post-action cleanup with custom executors) +# and made the trace UI's per-step timing harder to reason about. +# Parallelism, when wanted, belongs at the inference layer +# (vLLM / Ollama NUM_PARALLEL), not the FSM. + +def _step(state: State, name: str) -> dict[str, Any]: + """Append a step record to the trace; returns the dict so the action + can mutate timing/result fields.""" + trace = list(state.get("trace", [])) + rec = {"step": name, "started_at": time.time(), "ok": None} + trace.append(rec) + return rec, trace + + +@action(reads=["query"], writes=["geocode", "lat", "lon", "trace"]) +def step_geocode(state: State) -> State: + rec, trace = _step(state, "geocode") + try: + hit = geocode_one(state["query"]) + if hit is None: + rec["ok"] = False + rec["err"] = "no geocoder match" + # Burr requires every declared write to be populated. Emit + # explicit None rather than leaving keys absent. + return state.update(geocode=None, lat=None, lon=None, trace=trace) + rec["ok"] = True + rec["result"] = {"address": hit.address, "lat": hit.lat, "lon": hit.lon} + return state.update( + geocode={"address": hit.address, "borough": hit.borough, + "lat": hit.lat, "lon": hit.lon, + "bbl": hit.bbl, "bin": hit.bin}, + lat=hit.lat, lon=hit.lon, trace=trace, + ) + except Exception as e: + rec["ok"] = False + rec["err"] = str(e) + log.exception("geocode failed") + return state.update(geocode=None, lat=None, lon=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["sandy", "trace"]) +def step_sandy(state: State) -> State: + rec, trace = _step(state, "sandy_inundation") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(sandy=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(sandy=None, trace=trace) + pt_geom = (gpd.GeoDataFrame(geometry=[Point(state["lon"], state["lat"])], + crs="EPSG:4326") + .to_crs("EPSG:2263").iloc[0].geometry) + flag = sandy_inundation.inside_raster(pt_geom) + rec["ok"] = True; rec["result"] = {"inside": flag} + return state.update(sandy=flag, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("sandy failed") + return state.update(sandy=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["dep", "trace"]) +def step_dep(state: State) -> State: + rec, trace = _step(state, "dep_stormwater") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(dep=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(dep=None, trace=trace) + pt_geom = (gpd.GeoDataFrame(geometry=[Point(state["lon"], state["lat"])], + crs="EPSG:4326") + .to_crs("EPSG:2263").iloc[0].geometry) + out: dict[str, Any] = {} + for scen in ["dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current"]: + cls = dep_stormwater.join_raster(pt_geom, scen) + out[scen] = { + "depth_class": cls, + "depth_label": dep_stormwater.DEPTH_CLASS.get(cls, "outside"), + "citation": f"NYC DEP Stormwater Flood Map — {dep_stormwater.label(scen)}", + } + rec["ok"] = True; rec["result"] = {k: v["depth_label"] for k, v in out.items()} + return state.update(dep=out, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("dep failed") + return state.update(dep=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["floodnet", "trace"]) +def step_floodnet(state: State) -> State: + rec, trace = _step(state, "floodnet") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(floodnet=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(floodnet=None, trace=trace) + s = floodnet.summary_for_point(state["lat"], state["lon"], radius_m=600) + s["radius_m"] = 600 + rec["ok"] = True + rec["result"] = {"n_sensors": s["n_sensors"], + "n_events_3y": s["n_flood_events_3y"]} + return state.update(floodnet=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("floodnet failed") + return state.update(floodnet=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["nyc311", "trace"]) +def step_311(state: State) -> State: + rec, trace = _step(state, "nyc311") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(nyc311=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(nyc311=None, trace=trace) + s = nyc311.summary_for_point(state["lat"], state["lon"], radius_m=200, years=5) + rec["ok"] = True; rec["result"] = {"n": s["n"]} + return state.update(nyc311=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("311 failed") + return state.update(nyc311=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["ida_hwm", "trace"]) +def step_ida_hwm(state: State) -> State: + rec, trace = _step(state, "ida_hwm_2021") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(ida_hwm=None, trace=trace) + s = ida_hwm.summary_for_point(state["lat"], state["lon"], radius_m=800) + if s is None: + rec["ok"] = False; rec["err"] = "HWM data missing" + return state.update(ida_hwm=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "n_within_800m": s.n_within_radius, + "max_height_above_gnd_ft": s.max_height_above_gnd_ft, + "nearest_m": s.nearest_dist_m, + } + return state.update(ida_hwm=vars(s), trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("ida_hwm failed") + return state.update(ida_hwm=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["prithvi_water", "trace"]) +def step_prithvi(state: State) -> State: + rec, trace = _step(state, "prithvi_eo_v2") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(prithvi_water=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(prithvi_water=None, trace=trace) + s = prithvi_water.summary_for_point(state["lat"], state["lon"]) + if s is None: + rec["ok"] = False; rec["err"] = "Prithvi mask missing" + return state.update(prithvi_water=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "inside_water_polygon": s.inside_water_polygon, + "nearest_distance_m": s.nearest_distance_m, + "n_polygons_within_500m": s.n_polygons_within_500m, + } + return state.update(prithvi_water=vars(s), trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("prithvi failed") + return state.update(prithvi_water=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["prithvi_live", "trace"]) +def step_prithvi_live(state: State) -> State: + """Live Sentinel-2 water segmentation via Prithvi-EO 2.0. + + Network + 300M-param forward pass per query, so it's the slowest + specialist by far. Gracefully no-ops via the underlying module if + `RIPRAP_PRITHVI_LIVE_ENABLE=0` or if STAC / model load fails. + """ + rec, trace = _step(state, "prithvi_eo_live") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(prithvi_live=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(prithvi_live=None, trace=trace) + from app.flood_layers import prithvi_live + s = prithvi_live.fetch(state["lat"], state["lon"]) + rec["ok"] = bool(s.get("ok")) + if not s.get("ok"): + rec["err"] = s.get("err") or s.get("skipped") or "no observation" + else: + rec["result"] = { + "scene_date": (s.get("item_datetime") or "")[:10], + "cloud_cover": s.get("cloud_cover"), + "pct_water_500m": s.get("pct_water_within_500m"), + "pct_water_5km": s.get("pct_water_full"), + } + return state.update(prithvi_live=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("prithvi_live failed") + return state.update(prithvi_live=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["ttm_311_forecast", "trace"]) +def step_ttm_311_forecast(state: State) -> State: + """TTM r2 zero-shot forecast on weekly 311 flood-complaint counts + at this specific address (200 m radius). 52 weeks of context → + 4 weeks of forecast. Per-query, per-address, citable.""" + rec, trace = _step(state, "ttm_311_forecast") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(ttm_311_forecast=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(ttm_311_forecast=None, trace=trace) + s = ttm_forecast.weekly_311_forecast_for_point(state["lat"], state["lon"]) + rec["ok"] = bool(s.get("available")) + if not rec["ok"]: + rec["err"] = s.get("reason", "unavailable") + else: + rec["result"] = { + "history_total": s.get("history_total_complaints"), + "history_recent_mean": s.get("history_recent_3mo_mean"), + "forecast_mean": s.get("forecast_mean_per_week"), + "forecast_peak": s.get("forecast_peak_per_week"), + "accelerating": s.get("accelerating"), + } + return state.update(ttm_311_forecast=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("ttm_311_forecast failed") + return state.update(ttm_311_forecast=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["terramind", "trace"]) +def step_terramind(state: State) -> State: + """TerraMind v1 base — DEM → S2L2A synthesis as a per-query + cognitive-engine node. ~3-7s on M3 CPU. Output is a + *synthetic-prior* — explicitly fourth epistemic class alongside + empirical / modeled / proxy. Frame the doc body and reconciler + narration as 'plausible synthesis from terrain context', never + 'imaged' or 'reconstructed'.""" + rec, trace = _step(state, "terramind_synthesis") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(terramind=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(terramind=None, trace=trace) + from app.context import terramind_synthesis + s = terramind_synthesis.fetch(state["lat"], state["lon"]) + rec["ok"] = bool(s.get("ok")) + if not s.get("ok"): + rec["err"] = s.get("err") or s.get("skipped") or "terramind unavailable" + else: + rec["result"] = { + "tim_chain": s.get("tim_chain"), + "diffusion_steps": s.get("diffusion_steps"), + "dem_mean_m": s.get("dem_mean_m"), + "synth_chip_shape": s.get("synth_chip_shape"), + "elapsed_s": s.get("elapsed_s"), + } + return state.update(terramind=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("terramind failed") + return state.update(terramind=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["noaa_tides", "trace"]) +def step_noaa_tides(state: State) -> State: + rec, trace = _step(state, "noaa_tides") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(noaa_tides=None, trace=trace) + s = noaa_tides.summary_for_point(state["lat"], state["lon"]) + rec["ok"] = s.get("error") is None + rec["result"] = { + "station": s["station_id"], + "observed_ft_mllw": s["observed_ft_mllw"], + "residual_ft": s["residual_ft"], + } + if s.get("error"): rec["err"] = s["error"] + return state.update(noaa_tides=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("noaa_tides failed") + return state.update(noaa_tides=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["nws_alerts", "trace"]) +def step_nws_alerts(state: State) -> State: + rec, trace = _step(state, "nws_alerts") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(nws_alerts=None, trace=trace) + s = nws_alerts.summary_for_point(state["lat"], state["lon"]) + rec["ok"] = s.get("error") is None + rec["result"] = {"n_active": s["n_active"]} + if s.get("error"): rec["err"] = s["error"] + return state.update(nws_alerts=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("nws_alerts failed") + return state.update(nws_alerts=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["nws_obs", "trace"]) +def step_nws_obs(state: State) -> State: + rec, trace = _step(state, "nws_obs") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(nws_obs=None, trace=trace) + s = nws_obs.summary_for_point(state["lat"], state["lon"]) + rec["ok"] = s.get("error") is None + rec["result"] = { + "station": s["station_id"], + "p1h_mm": s["precip_last_hour_mm"], + "p6h_mm": s["precip_last_6h_mm"], + } + if s.get("error"): rec["err"] = s["error"] + return state.update(nws_obs=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("nws_obs failed") + return state.update(nws_obs=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["ttm_forecast", "trace"]) +def step_ttm_forecast(state: State) -> State: + """Granite TTM r2 zero-shot forecast of the Battery surge residual.""" + rec, trace = _step(state, "ttm_forecast") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(ttm_forecast=None, trace=trace) + s = ttm_forecast.summary_for_point(state["lat"], state["lon"]) + if not s.get("available"): + rec["ok"] = False + rec["err"] = s.get("reason", "TTM unavailable") + return state.update(ttm_forecast=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "context": s["context_length"], + "horizon": s["horizon_steps"], + "forecast_peak_ft": s["forecast_peak_ft"], + "forecast_peak_min_ahead": s["forecast_peak_minutes_ahead"], + "interesting": s["interesting"], + } + return state.update(ttm_forecast=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("ttm_forecast failed") + return state.update(ttm_forecast=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["ttm_battery_surge", "trace"]) +def step_ttm_battery_surge(state: State) -> State: + """Granite TTM r2 fine-tune — 96 h hourly Battery surge nowcast. + + Same TTM r2 backbone family as step_ttm_forecast but a different + artefact: msradam/Granite-TTM-r2-Battery-Surge, trained on AMD + MI300X. Hourly cadence vs the zero-shot's 6-min, 4-day vs 9.6 h + horizon. Both can fire on the same query — the reconciler frames + each as a distinct forecast in the briefing.""" + rec, trace = _step(state, "ttm_battery_surge") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(ttm_battery_surge=None, trace=trace) + # Battery gauge is a single point; the forecast applies citywide + # to NYC harbor entrance, so we don't gate by NYC bbox. + from app.live import ttm_battery_surge + s = ttm_battery_surge.fetch() + rec["ok"] = bool(s.get("available")) + if not rec["ok"]: + rec["err"] = s.get("reason", "unavailable") + return state.update(ttm_battery_surge=None, trace=trace) + rec["result"] = { + "context_h": s.get("context_hours"), + "horizon_h": s.get("horizon_hours"), + "forecast_peak_m": s.get("forecast_peak_m"), + "forecast_peak_hours_ahead": s.get("forecast_peak_hours_ahead"), + "interesting": s.get("interesting"), + } + return state.update(ttm_battery_surge=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("ttm_battery_surge failed") + return state.update(ttm_battery_surge=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["floodnet_forecast", "trace"]) +def step_floodnet_forecast(state: State) -> State: + """TTM r2 forecast of flood-event recurrence at the nearest FloodNet + sensor. Reuses the same (512, 96) singleton as ttm_311_forecast — no + additional model loaded into memory. Silent when the sensor has too + few historical events for a defensible forecast.""" + rec, trace = _step(state, "floodnet_forecast") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(floodnet_forecast=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(floodnet_forecast=None, trace=trace) + s = fn_forecast.summary_for_point(state["lat"], state["lon"]) + rec["ok"] = bool(s.get("available")) + if not rec["ok"]: + rec["err"] = s.get("reason", "unavailable") + else: + rec["result"] = { + "sensor_id": s.get("sensor_id"), + "distance_m": s.get("distance_from_query_m"), + "history_28d": s.get("history_recent_28d_events"), + "forecast_28d": s.get("forecast_28d_expected_events"), + "accelerating": s.get("accelerating"), + } + return state.update(floodnet_forecast=s if rec["ok"] else None, + trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("floodnet_forecast failed") + return state.update(floodnet_forecast=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["npcc4_slr", "trace"]) +def step_npcc4_projection(state: State) -> State: + """NPCC4 (2024) sea-level rise table — static lookup, always available.""" + rec, trace = _step(state, "npcc4_projection") + try: + s = npcc4_slr.get_projections() + rec["ok"] = True + rec["result"] = { + "2050_10th_in": s["2050"]["10"]["in"], + "2050_50th_in": s["2050"]["50"]["in"], + "2050_90th_in": s["2050"]["90"]["in"], + "2100_90th_in": s["2100"]["90"]["in"], + } + return state.update(npcc4_slr=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("npcc4_projection failed") + return state.update(npcc4_slr=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["mta_entrances", "trace"]) +def step_mta_entrances(state: State) -> State: + rec, trace = _step(state, "mta_entrance_exposure") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(mta_entrances=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(mta_entrances=None, trace=trace) + s = r_mta.summary_for_point(state["lat"], state["lon"]) + if not s.get("available"): + rec["ok"] = False; rec["err"] = "no entrances within radius" + return state.update(mta_entrances=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "n_entrances": s["n_entrances"], + "n_inside_sandy_2012": s["n_inside_sandy_2012"], + "n_in_dep_extreme_2080": s["n_in_dep_extreme_2080"], + } + return state.update(mta_entrances=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("mta_entrances failed") + return state.update(mta_entrances=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["nycha_developments", "trace"]) +def step_nycha(state: State) -> State: + rec, trace = _step(state, "nycha_development_exposure") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(nycha_developments=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(nycha_developments=None, trace=trace) + s = r_nycha.summary_for_point(state["lat"], state["lon"]) + if not s.get("available"): + rec["ok"] = False; rec["err"] = "no NYCHA developments within radius" + return state.update(nycha_developments=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "n_developments": s["n_developments"], + "n_inside_sandy_2012": s["n_inside_sandy_2012"], + "n_in_dep_extreme_2080": s["n_in_dep_extreme_2080"], + } + return state.update(nycha_developments=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("nycha failed") + return state.update(nycha_developments=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["doe_schools", "trace"]) +def step_doe_schools(state: State) -> State: + rec, trace = _step(state, "doe_school_exposure") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(doe_schools=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(doe_schools=None, trace=trace) + s = r_schools.summary_for_point(state["lat"], state["lon"]) + if not s.get("available"): + rec["ok"] = False; rec["err"] = "no schools within radius" + return state.update(doe_schools=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "n_schools": s["n_schools"], + "n_inside_sandy_2012": s["n_inside_sandy_2012"], + "n_in_dep_extreme_2080": s["n_in_dep_extreme_2080"], + } + return state.update(doe_schools=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("doe_schools failed") + return state.update(doe_schools=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["doh_hospitals", "trace"]) +def step_doh_hospitals(state: State) -> State: + rec, trace = _step(state, "doh_hospital_exposure") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(doh_hospitals=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(doh_hospitals=None, trace=trace) + s = r_hospitals.summary_for_point(state["lat"], state["lon"]) + if not s.get("available"): + rec["ok"] = False; rec["err"] = "no hospitals within radius" + return state.update(doh_hospitals=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "n_hospitals": s["n_hospitals"], + "n_inside_sandy_2012": s["n_inside_sandy_2012"], + "n_in_dep_extreme_2080": s["n_in_dep_extreme_2080"], + } + return state.update(doh_hospitals=s, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("doh_hospitals failed") + return state.update(doh_hospitals=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon"], writes=["microtopo", "trace"]) +def step_microtopo(state: State) -> State: + rec, trace = _step(state, "microtopo_lidar") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(microtopo=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(microtopo=None, trace=trace) + m = microtopo.microtopo_at(state["lat"], state["lon"]) + if m is None: + rec["ok"] = False; rec["err"] = "DEM fetch failed" + return state.update(microtopo=None, trace=trace) + rec["ok"] = True + rec["result"] = { + "elev_m": m.point_elev_m, + "pct_200m": m.rel_elev_pct_200m, + "relief_m": m.basin_relief_m, + } + return state.update(microtopo=vars(m), trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("microtopo failed") + return state.update(microtopo=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + + + +@action(reads=["lat", "lon"], writes=["eo_chip", "trace"]) +def step_eo_chip(state: State) -> State: + """Fetch one S2L2A + S1RTC + DEM chip per query and stash it in + state for the TerraMind-NYC specialists. + + Centralised so step_terramind_lulc and step_terramind_buildings + don't each re-fetch ~150 MB of imagery. Best-effort by design — + a deps-missing or no-scene outcome writes `{ok: False, skipped: ...}` + and the downstream TerraMind specialists silently no-op.""" + rec, trace = _step(state, "eo_chip_fetch") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(eo_chip=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(eo_chip=None, trace=trace) + from app.context import eo_chip_cache + chip = eo_chip_cache.fetch(state["lat"], state["lon"]) + rec["ok"] = bool(chip.get("ok")) + if not rec["ok"]: + rec["err"] = chip.get("skipped") or chip.get("err") or "unavailable" + else: + rec["result"] = { + "scene_id": (chip.get("s2_meta") or {}).get("scene_id"), + "scene_date": ((chip.get("s2_meta") or {}).get("datetime") or "")[:10], + "cloud_cover": (chip.get("s2_meta") or {}).get("cloud_cover"), + "has_s1": chip.get("s1") is not None, + "has_dem": chip.get("dem") is not None, + } + return state.update(eo_chip=chip, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("eo_chip failed") + return state.update(eo_chip=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon", "eo_chip"], writes=["terramind_lulc", "trace"]) +def step_terramind_lulc(state: State) -> State: + """5-class macro NYC LULC via msradam/TerraMind-NYC-Adapters. + + Consumes the shared chip from step_eo_chip; if that didn't fire + cleanly this no-ops. Adapter loading (~1.6 GB base + ~325 MB LoRA) + is lazy on first call and cached across queries.""" + rec, trace = _step(state, "terramind_lulc") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(terramind_lulc=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(terramind_lulc=None, trace=trace) + chip = state.get("eo_chip") or {} + if not chip.get("ok"): + rec["ok"] = False + rec["err"] = chip.get("skipped") or chip.get("err") or "no chip" + return state.update(terramind_lulc=None, trace=trace) + from app.context import terramind_nyc + tensors = chip.get("tensors") or {} + out = terramind_nyc.lulc( + tensors.get("S2L2A"), + s1rtc=tensors.get("S1RTC"), + dem=tensors.get("DEM"), + bounds_4326=chip.get("bounds_4326"), + ) + rec["ok"] = bool(out.get("ok")) + if not rec["ok"]: + rec["err"] = out.get("skipped") or out.get("err") or "unavailable" + else: + rec["result"] = { + "dominant_class": out.get("dominant_class"), + "dominant_pct": out.get("dominant_pct"), + "n_classes_observed": len(out.get("class_fractions") or {}), + } + return state.update(terramind_lulc=out, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("terramind_lulc failed") + return state.update(terramind_lulc=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["lat", "lon", "eo_chip"], + writes=["terramind_buildings", "trace"]) +def step_terramind_buildings(state: State) -> State: + """Binary NYC building-footprint mask via msradam/TerraMind-NYC-Adapters.""" + rec, trace = _step(state, "terramind_buildings") + try: + if state.get("lat") is None: + rec["ok"] = False; rec["err"] = "no coords" + return state.update(terramind_buildings=None, trace=trace) + if not _in_nyc(state["lat"], state["lon"]): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(terramind_buildings=None, trace=trace) + chip = state.get("eo_chip") or {} + if not chip.get("ok"): + rec["ok"] = False + rec["err"] = chip.get("skipped") or chip.get("err") or "no chip" + return state.update(terramind_buildings=None, trace=trace) + from app.context import terramind_nyc + tensors = chip.get("tensors") or {} + out = terramind_nyc.buildings( + tensors.get("S2L2A"), + s1rtc=tensors.get("S1RTC"), + dem=tensors.get("DEM"), + bounds_4326=chip.get("bounds_4326"), + ) + rec["ok"] = bool(out.get("ok")) + if not rec["ok"]: + rec["err"] = out.get("skipped") or out.get("err") or "unavailable" + else: + rec["result"] = { + "pct_buildings": out.get("pct_buildings"), + "n_building_components": out.get("n_building_components"), + } + return state.update(terramind_buildings=out, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("terramind_buildings failed") + return state.update(terramind_buildings=None, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["geocode", "sandy", "dep", "floodnet", "nyc311", "microtopo", + "ida_hwm", "prithvi_water", "noaa_tides", "nws_alerts", "nws_obs", + "ttm_forecast"], + writes=["rag", "trace"]) +def step_rag(state: State) -> State: + rec, trace = _step(state, "rag_granite_embedding") + try: + geo = state.get("geocode") or {} + if not _in_nyc(geo.get("lat"), geo.get("lon")): + rec["ok"] = False; rec["err"] = "out of NYC scope" + return state.update(rag=[], trace=trace) + sandy = state.get("sandy") + dep = state.get("dep") or {} + # Build a context-rich query so retrieval pulls policy paragraphs + # relevant to *this* address, not generic flood text. + bits = [] + if geo.get("address"): + bits.append(f"address {geo['address']}") + if geo.get("borough"): + bits.append(f"in {geo['borough']}") + if sandy: + bits.append("inside Hurricane Sandy 2012 inundation zone") + for v in dep.values(): + if v.get("depth_class", 0) > 0: + bits.append(f"in {v['depth_label']} pluvial scenario") + bits.append("flood resilience plan, vulnerability, hardening, mitigation") + q = "; ".join(bits) + hits = rag_retrieve(q, k=3, min_score=0.45) + rec["ok"] = True + rec["result"] = {"hits": len(hits), + "top": [(h["doc_id"], round(h["score"], 2)) for h in hits]} + return state.update(rag=hits, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("rag failed") + return state.update(rag=[], trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +@action(reads=["rag"], writes=["gliner", "trace"]) +def step_gliner(state: State) -> State: + """GLiNER typed-entity extraction over the top RAG paragraphs. + + Adds structured fields (`agency`, `dollar_amount`, + `infrastructure_project`, `nyc_location`, `date_range`) the + reconciler can cite with `[gliner_]`. Silent no-op when + disabled via RIPRAP_GLINER_ENABLE=0 or when the model failed to + load — preserves the existing FSM contract. + """ + rec, trace = _step(state, "gliner_extract") + try: + from app.context.gliner_extract import extract_for_rag_hits + hits = state.get("rag") or [] + if not hits: + rec["ok"] = True + rec["result"] = {"sources": 0, "skipped": "no rag hits"} + return state.update(gliner={}, trace=trace) + out = extract_for_rag_hits(hits) + rec["ok"] = True + rec["result"] = { + "sources": len(out), + "totals_by_label": _label_counts(out), + } + return state.update(gliner=out, trace=trace) + except Exception as e: + rec["ok"] = False + rec["err"] = str(e) + log.exception("gliner failed") + return state.update(gliner={}, trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +def _label_counts(gliner_out: dict[str, dict]) -> dict[str, int]: + counts: dict[str, int] = {} + for src in gliner_out.values(): + for e in src.get("entities", []): + counts[e["label"]] = counts.get(e["label"], 0) + 1 + return counts + + +@action(reads=["geocode", "sandy", "dep", "floodnet", "nyc311", "microtopo", + "ida_hwm", "prithvi_water", "prithvi_live", "terramind", + "terramind_lulc", "terramind_buildings", + "noaa_tides", "nws_alerts", "nws_obs", "ttm_forecast", + "ttm_311_forecast", "floodnet_forecast", "npcc4_slr", + "ttm_battery_surge", + "mta_entrances", + "nycha_developments", "doe_schools", "doh_hospitals", + "rag", "gliner"], + writes=["paragraph", "audit", "mellea", "citations", "trace"]) +def step_reconcile(state: State) -> State: + is_strict = _current_strict_mode() + rec, trace = _step(state, "mellea_reconcile_address" if is_strict else "reconcile_granite41") + mellea_meta = None + try: + snap = { + "geocode": state.get("geocode"), + "sandy": state.get("sandy"), + "dep": state.get("dep"), + "floodnet": state.get("floodnet"), + "nyc311": state.get("nyc311"), + "microtopo": state.get("microtopo"), + "ida_hwm": state.get("ida_hwm"), + "prithvi_water": state.get("prithvi_water"), + "noaa_tides": state.get("noaa_tides"), + "nws_alerts": state.get("nws_alerts"), + "nws_obs": state.get("nws_obs"), + "ttm_forecast": state.get("ttm_forecast"), + "ttm_311_forecast": state.get("ttm_311_forecast"), + "floodnet_forecast": state.get("floodnet_forecast"), + "npcc4_slr": state.get("npcc4_slr"), + "ttm_battery_surge": state.get("ttm_battery_surge"), + "rag": state.get("rag"), + "gliner": state.get("gliner"), + "prithvi_live": state.get("prithvi_live"), + "terramind": state.get("terramind"), + "terramind_lulc": state.get("terramind_lulc"), + "terramind_buildings": state.get("terramind_buildings"), + "mta_entrances": state.get("mta_entrances"), + "nycha_developments": state.get("nycha_developments"), + "doe_schools": state.get("doe_schools"), + "doh_hospitals": state.get("doh_hospitals"), + } + if is_strict: + from app.framing import augment_system_prompt + from app.mellea_validator import DEFAULT_LOOP_BUDGET, reconcile_strict_streaming + from app.reconcile import EXTRA_SYSTEM_PROMPT, build_documents, trim_docs_to_plan + doc_msgs = build_documents(snap) + doc_msgs = trim_docs_to_plan(doc_msgs, _current_planned_specialists()) + if not doc_msgs: + para = "No grounded data available for this address." + audit = {"raw": para, "dropped": []} + else: + token_cb = _current_token_callback() + attempt_cb = _current_mellea_attempt_callback() + framed_prompt = augment_system_prompt( + EXTRA_SYSTEM_PROMPT, + query=_current_user_query() or state.get("query") or "", + intent=_current_planner_intent() or "single_address", + ) + # Forward the (delta, attempt_idx) pair through. Older + # token_cb signatures were single-arg; we detect by + # introspecting the callable's expected positional count + # so single_address.py's old shape still works while new + # callbacks see the attempt index they need to clear the + # frontend buffer on a Mellea reroll. + def _fwd_token(delta: str, attempt_idx: int) -> None: + if token_cb is None: + return + try: + token_cb(delta, attempt_idx) + except TypeError: + token_cb(delta) + mres = reconcile_strict_streaming( + doc_msgs, framed_prompt, + user_prompt="Write the cited paragraph now.", + loop_budget=DEFAULT_LOOP_BUDGET, + on_token=_fwd_token if token_cb else None, + on_attempt_end=attempt_cb, + ) + para = mres["paragraph"] + audit = {"raw": para, "dropped": []} + mellea_meta = { + "rerolls": mres["rerolls"], + "n_attempts": mres["n_attempts"], + "requirements_passed": mres["requirements_passed"], + "requirements_failed": mres["requirements_failed"], + "requirements_total": mres["requirements_total"], + "model": mres["model"], + "loop_budget": mres["loop_budget"], + } + rec["result"] = { + "rerolls": (mellea_meta or {}).get("rerolls"), + "passed": (f"{len((mellea_meta or {}).get('requirements_passed') or [])}/" + f"{(mellea_meta or {}).get('requirements_total') or 0}"), + "paragraph_chars": len(para), + } + else: + para, audit = run_reconcile(snap, return_audit=True, + on_token=_current_token_callback()) + rec["result"] = { + "paragraph_chars": len(para), + "dropped_sentences": len(audit["dropped"]), + } + # Build citation metadata list from whichever doc_msgs were used. + from app.reconcile import build_documents, trim_docs_to_plan + _cite_msgs = build_documents(snap) + _cite_msgs = trim_docs_to_plan(_cite_msgs, _current_planned_specialists()) + cite_list = citations_from_docs(_cite_msgs) + rec["ok"] = True + return state.update(paragraph=para, audit=audit, + mellea=mellea_meta, citations=cite_list, trace=trace) + except Exception as e: + rec["ok"] = False; rec["err"] = str(e) + log.exception("reconcile failed") + return state.update(paragraph="", audit={"raw": "", "dropped": []}, + mellea=None, citations=[], trace=trace) + finally: + rec["elapsed_s"] = round(time.time() - rec["started_at"], 2) + + +import os as _os # noqa: E402 + + +# Specialists that involve large spatial joins (every NYCHA development +# overlapped against multiple flood layers, every DOE school footprint +# joined to DEM/HAND, etc.) or per-query model inference (Prithvi-EO live +# STAC + ViT, TerraMind diffusion). They're ~1-3 minutes apiece on a +# laptop on the FIRST call (the lru_caches inside the registers warm up +# afterwards). The previous parallel-fan-out FSM hid that cost behind +# the longest single specialist; the linear FSM exposes it. +# +# Default OFF on local-Ollama so the demo briefing returns in well under +# 90 s. Enable explicitly with RIPRAP_HEAVY_SPECIALISTS=1 (e.g. on the +# AMD-vLLM path, where the reconciler's ~5 s leaves room for the joins). +# +# Remote ML lift: when RIPRAP_ML_BACKEND=remote (or auto with a base URL +# set) the heavy specialists' GPU work runs on the droplet, so the local +# wall-clock cost drops from ~60 s to ~5 s. Default ON in that case so +# the public demo never silently disables them. +def _remote_ml_configured() -> bool: + backend = _os.environ.get("RIPRAP_ML_BACKEND", "auto").lower() + if backend == "local": + return False + return bool(_os.environ.get("RIPRAP_ML_BASE_URL", "").strip()) + + +_HEAVY_DEFAULT = ( + "1" if ( + _os.environ.get("RIPRAP_LLM_PRIMARY", "ollama").lower() != "ollama" + or _remote_ml_configured() + ) else "0" +) +_HEAVY_SPECIALISTS_ENABLED = _os.environ.get( + "RIPRAP_HEAVY_SPECIALISTS", _HEAVY_DEFAULT, +).lower() in ("1", "true", "yes") + +# NYCHA / DOE / DOH registers load a 91 MB sandy_inundation.geojson via +# geopandas on first call. On machines with slow I/O or single-threaded +# Python GIL contention (M3 local dev) this takes 3–5 min and makes the +# first single_address query appear hung. Disable by default; enable on +# the AMD droplet where the server pre-warms these at startup. +_NYCHA_REGISTERS_ENABLED = _os.environ.get( + "RIPRAP_NYCHA_REGISTERS", "0", +).lower() in ("1", "true", "yes") + + +def build_app(query: str): + """Linear, single-action-per-step Burr application. + + Order: cheap-first geo + flood layers, then live live network signals, + then RAG → reconcile. Heavy specialists (NYCHA / DOE / DOH register + joins, Prithvi-EO live STAC, TerraMind diffusion) are gated behind + RIPRAP_HEAVY_SPECIALISTS — see the module-level note above. + """ + builder = ( + ApplicationBuilder() + .with_state(query=query, trace=[]) + .with_entrypoint("geocode") + ) + + actions: dict[str, Any] = { + "geocode": step_geocode, + "sandy": step_sandy, + "dep": step_dep, + "floodnet": step_floodnet, + "nyc311": step_311, + "noaa_tides": step_noaa_tides, + "nws_alerts": step_nws_alerts, + "nws_obs": step_nws_obs, + "ttm_forecast": step_ttm_forecast, + "ttm_311_forecast": step_ttm_311_forecast, + "floodnet_forecast": step_floodnet_forecast, + "npcc4_projection": step_npcc4_projection, + "ttm_battery_surge": step_ttm_battery_surge, + "microtopo": step_microtopo, + "ida_hwm": step_ida_hwm, + "mta_entrances": step_mta_entrances, + "prithvi": step_prithvi, # baked GeoJSON polygons for Ida; cheap + } + if _HEAVY_SPECIALISTS_ENABLED and _NYCHA_REGISTERS_ENABLED: + actions["nycha"] = step_nycha + actions["doe_schools"] = step_doe_schools + actions["doh_hospitals"] = step_doh_hospitals + if _HEAVY_SPECIALISTS_ENABLED: + actions["prithvi_live"] = step_prithvi_live + actions["terramind"] = step_terramind + # New TerraMind-NYC LoRA family — one chip fetch feeds two + # specialists. Keep eo_chip directly before the two consumers + # so the chip stays warm in memory and isn't garbage-collected + # by anything in between. + actions["eo_chip"] = step_eo_chip + actions["terramind_lulc"] = step_terramind_lulc + actions["terramind_buildings"] = step_terramind_buildings + actions["rag"] = step_rag + actions["gliner"] = step_gliner + actions["reconcile"] = step_reconcile + + # Sequential transitions — pair every adjacent action in the dict order. + keys = list(actions.keys()) + transitions = list(zip(keys, keys[1:])) + + return ( + builder.with_actions(**actions).with_transitions(*transitions).build() + ) + + +def _summarize_energy(trace: list) -> dict | None: + rec_step = next((t for t in trace if t.get("step") == "reconcile_granite41" + and t.get("ok")), None) + if not rec_step: + return None + total_s = sum(t.get("elapsed_s", 0) or 0 for t in trace) + return energy_estimate(rec_step.get("elapsed_s", 0) or 0, total_s) + + +def _summarize_emissions() -> dict | None: + """Snapshot the active per-call emissions tracker, if installed. + + Returns None when no tracker is bound to this thread (e.g. unit + tests that call `fsm.run` directly without going through the + web/intent layer that installs one).""" + t = emissions.current() + return t.summarize() if t is not None else None + + +def run(query: str) -> dict[str, Any]: + app = build_app(query) + final_action, _, final_state = app.run(halt_after=["reconcile"]) + trace = final_state.get("trace", []) + return { + "query": query, + "geocode": final_state.get("geocode"), + "sandy": final_state.get("sandy"), + "dep": final_state.get("dep"), + "floodnet": final_state.get("floodnet"), + "nyc311": final_state.get("nyc311"), + "microtopo": final_state.get("microtopo"), + "ida_hwm": final_state.get("ida_hwm"), + "prithvi_water": final_state.get("prithvi_water"), + "terramind": final_state.get("terramind"), + "terramind_lulc": final_state.get("terramind_lulc"), + "terramind_buildings": final_state.get("terramind_buildings"), + "eo_chip": final_state.get("eo_chip"), + "noaa_tides": final_state.get("noaa_tides"), + "nws_alerts": final_state.get("nws_alerts"), + "nws_obs": final_state.get("nws_obs"), + "ttm_forecast": final_state.get("ttm_forecast"), + "ttm_311_forecast": final_state.get("ttm_311_forecast"), + "floodnet_forecast": final_state.get("floodnet_forecast"), + "ttm_battery_surge": final_state.get("ttm_battery_surge"), + "mta_entrances": final_state.get("mta_entrances"), + "nycha_developments": final_state.get("nycha_developments"), + "doe_schools": final_state.get("doe_schools"), + "doh_hospitals": final_state.get("doh_hospitals"), + "rag": final_state.get("rag"), + "paragraph": final_state.get("paragraph"), + "audit": final_state.get("audit"), + "mellea": final_state.get("mellea"), + "energy": _summarize_energy(trace), + "emissions": _summarize_emissions(), + "trace": trace, + } + + +def iter_steps(query: str): + """Yield SSE-friendly events as the FSM runs. + + Each Burr action emits exactly one trace record on completion; we + yield it as a `step` event the moment the iterate loop returns from + that action. Reconciler tokens stream through the threadlocal + `set_token_callback` (installed before this generator is iterated), + not through this queue. + + Burr's `app.iterate(halt_after=["reconcile"])` runs synchronously, + yielding `(action, result, state)` after every action. We drive it + in a background thread so the per-action SSE events reach the + client as soon as each action returns, while the reconciler's + token callback fires concurrently from the same thread. + """ + import queue + + q: queue.Queue[tuple[str, Any] | None] = queue.Queue() + seen_keys: set[tuple[str, float]] = set() + + def _push_step(rec: dict) -> None: + key = (rec.get("step", ""), rec.get("started_at", 0.0)) + if key in seen_keys: + return + seen_keys.add(key) + q.put(("step", rec)) + + app = build_app(query) + final_state_holder: dict[str, Any] = {} + + # Threadlocals are per-thread; the request thread (single_address.run + # / neighborhood.run) sets the strict-mode flag, planner specialist + # set, and token / Mellea-attempt callbacks, but Burr's app.iterate + # runs in this generator's thread. Snapshot the request-thread state + # and re-install on the iterate thread so step_reconcile sees them. + _captured_strict = _current_strict_mode() + _captured_planned = _current_planned_specialists() + _captured_token_cb = _current_token_callback() + _captured_mellea_cb = _current_mellea_attempt_callback() + _captured_tracker = emissions.current() + + def _run_iterate(): + set_strict_mode(_captured_strict) + set_planned_specialists(_captured_planned) + set_token_callback(_captured_token_cb) + set_mellea_attempt_callback(_captured_mellea_cb) + emissions.install(_captured_tracker) + try: + for _action_obj, _result, state in app.iterate(halt_after=["reconcile"]): + final_state_holder["state"] = state + # Each action appends one record to state.trace; emit the + # most recent so the SSE client gets the step event the + # moment Burr returns from that action. + trace = state.get("trace") or [] + if trace: + _push_step(trace[-1]) + except Exception as e: + log.exception("iterate raised") + q.put(("error", {"err": f"{type(e).__name__}: {e}"})) + finally: + set_strict_mode(False) + set_planned_specialists(None) + set_token_callback(None) + set_mellea_attempt_callback(None) + emissions.install(None) + q.put(None) # sentinel + + runner = _threading.Thread(target=_run_iterate, name="riprap-fsm", + daemon=True) + runner.start() + + while True: + item = q.get() + if item is None: + break + kind, payload = item + if kind == "step": + yield { + "kind": "step", + "step": payload.get("step"), + "ok": payload.get("ok"), + "elapsed_s": payload.get("elapsed_s"), + "result": payload.get("result"), + "err": payload.get("err"), + } + elif kind == "error": + yield {"kind": "error", **payload} + + runner.join(timeout=5) + state = final_state_holder.get("state") + if state is None: + yield {"kind": "final", "paragraph": "", "error": "FSM failed before any action completed"} + return + trace = state.get("trace", []) + yield { + "kind": "final", + "geocode": state.get("geocode"), + "sandy": state.get("sandy"), + "dep": state.get("dep"), + "floodnet": state.get("floodnet"), + "nyc311": state.get("nyc311"), + "microtopo": state.get("microtopo"), + "ida_hwm": state.get("ida_hwm"), + "prithvi_water": state.get("prithvi_water"), + "prithvi_live": state.get("prithvi_live"), + "terramind": state.get("terramind"), + "terramind_lulc": state.get("terramind_lulc"), + "terramind_buildings": state.get("terramind_buildings"), + "noaa_tides": state.get("noaa_tides"), + "nws_alerts": state.get("nws_alerts"), + "nws_obs": state.get("nws_obs"), + "ttm_forecast": state.get("ttm_forecast"), + "ttm_311_forecast": state.get("ttm_311_forecast"), + "floodnet_forecast": state.get("floodnet_forecast"), + "ttm_battery_surge": state.get("ttm_battery_surge"), + "mta_entrances": state.get("mta_entrances"), + "nycha_developments": state.get("nycha_developments"), + "doe_schools": state.get("doe_schools"), + "doh_hospitals": state.get("doh_hospitals"), + "rag": state.get("rag"), + "gliner": state.get("gliner"), + "paragraph": state.get("paragraph"), + "audit": state.get("audit"), + "mellea": state.get("mellea"), + "citations": state.get("citations"), + "energy": _summarize_energy(trace), + "emissions": _summarize_emissions(), + } diff --git a/app/geocode.py b/app/geocode.py new file mode 100644 index 0000000000000000000000000000000000000000..090b7d82f2e962a7cd23c78afa64de7aa56439cc --- /dev/null +++ b/app/geocode.py @@ -0,0 +1,138 @@ +"""Address geocoding — NYC primary + national fallback. + +NYC primary: NYC DCP Geosearch (geosearch.planninglabs.nyc), no auth, +NYC-only. It will fuzzy-match upstate addresses to NYC streets — e.g. +'257 Washington Ave, Albany NY' silently maps to Clinton Hill, Brooklyn. +We detect this via a non-NYC region or non-NYC ZIP and fall back to +OpenStreetMap Nominatim (no key, free, rate-limited per usage policy). + +Includes a borough-hint post-filter so Queens hyphenated-style addresses +(e.g. '153-09 90 Ave, Jamaica, Queens') preferentially resolve to the +borough the user named. +""" +from __future__ import annotations + +import logging +import re +from dataclasses import dataclass + +import httpx + +log = logging.getLogger("riprap.geocode") + +URL = "https://geosearch.planninglabs.nyc/v2/search" +NOMINATIM_URL = "https://nominatim.openstreetmap.org/search" +NOMINATIM_UA = "Riprap-NYC/0.5 (civic-flood-tool; +https://huggingface.co/spaces/msradam/riprap-nyc)" + +# NYC-bbox guard: lat 40.49–40.92, lon -74.27 to -73.69. +NYC_BBOX = (40.49, -74.27, 40.92, -73.69) + +_UPSTATE_ZIP_RE = re.compile(r"\b1[2-4]\d{3}\b") +_BOROUGHS = ("Manhattan", "Bronx", "Brooklyn", "Queens", "Staten Island") + +def _detect_borough(text: str) -> str | None: + t = text.lower() + for b in _BOROUGHS: + if b.lower() in t: + return b + # neighborhood -> borough hints + hints = { + "queens": "Queens", "jamaica": "Queens", "rockaway": "Queens", + "astoria": "Queens", "flushing": "Queens", + "manhattan": "Manhattan", "harlem": "Manhattan", "soho": "Manhattan", + "brooklyn": "Brooklyn", "bushwick": "Brooklyn", "red hook": "Brooklyn", + "bronx": "Bronx", "fordham": "Bronx", + "staten island": "Staten Island", + } + for needle, boro in hints.items(): + if needle in t: + return boro + return None + +@dataclass +class GeocodeHit: + address: str + borough: str | None + lat: float + lon: float + bbl: str | None + bin: str | None + raw: dict + +def geocode(text: str, limit: int = 5) -> list[GeocodeHit]: + """NYC Geosearch primary.""" + try: + r = httpx.get(URL, params={"text": text, "size": limit}, timeout=5) + r.raise_for_status() + feats = r.json().get("features", []) + out = [] + for f in feats: + p = f.get("properties", {}) + coords = (f.get("geometry") or {}).get("coordinates") or [None, None] + out.append(GeocodeHit( + address=p.get("label") or p.get("name") or text, + borough=p.get("borough"), + lat=coords[1], + lon=coords[0], + bbl=p.get("addendum", {}).get("pad", {}).get("bbl"), + bin=p.get("addendum", {}).get("pad", {}).get("bin"), + raw=p, + )) + return out + except Exception as e: + log.warning("Geosearch failed: %r", e) + return [] + +def geocode_nominatim(text: str) -> GeocodeHit | None: + """National OSM Nominatim fallback.""" + try: + r = httpx.get(NOMINATIM_URL, params={ + "q": text, "format": "jsonv2", "addressdetails": "1", + "limit": 1, "countrycodes": "us", + }, headers={"User-Agent": NOMINATIM_UA}, timeout=10) + r.raise_for_status() + rows = r.json() + except Exception as e: + log.warning("Nominatim fetch failed: %r", e) + return None + if not rows: + return None + row = rows[0] + addr = row.get("address") or {} + + # Try to map Nominatim borough/county back to NYC standard + boro = addr.get("suburb") or addr.get("city_district") or addr.get("county") + if boro and "Kings" in boro: boro = "Brooklyn" + if boro and "New York County" in boro: boro = "Manhattan" + if boro and "Queens" in boro: boro = "Queens" + if boro and "Bronx" in boro: boro = "Bronx" + if boro and "Richmond" in boro: boro = "Staten Island" + + return GeocodeHit( + address=row.get("display_name") or text, + borough=boro, + lat=float(row["lat"]), + lon=float(row["lon"]), + bbl=None, # Nominatim doesn't have BBLs + bin=None, + raw={"source": "nominatim", **row}, + ) + +def geocode_one(text: str) -> GeocodeHit | None: + """Dynamic geocoder with failover.""" + # 1. Try Geosearch + hits = geocode(text) + hint = _detect_borough(text) + + if hint: + in_boro = [h for h in hits if h.borough and h.borough.lower() == hint.lower()] + if in_boro: return in_boro[0] + + if hits: + top = hits[0] + if top.lat and 40.4 <= top.lat <= 41.0: # Broad NYC check + return top + + # 2. Fall back to Nominatim + log.info("Falling back to Nominatim for %r", text) + return geocode_nominatim(text) diff --git a/app/inference.py b/app/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..9854af958cabb359a8d63cfad4955fbcc07d583c --- /dev/null +++ b/app/inference.py @@ -0,0 +1,268 @@ +"""Remote-vs-local ML inference router. + +Mirrors the call-surface shape of `app/llm.py` but for the non-LLM +heavy models (Prithvi, TerraMind, TTM, Granite Embedding, GLiNER). + +The droplet runs a `riprap-models` FastAPI service alongside vLLM that +exposes an OpenAI-style endpoint per model class. When configured the +router POSTs the relevant payload there and returns the parsed response; +on connection error / 5xx / timeout it surfaces a typed exception that +caller modules catch and fall back to a local in-process model load. + +Backend selection (env): + + RIPRAP_ML_BACKEND = "remote" | "local" | "auto" (default: auto) + - remote: use only the droplet, raise if it errors + - local : never call the droplet, always use the + in-process model + - auto : try remote first, fall back to local if + remote is unreachable / errors out; + same semantics as app/llm.py + RIPRAP_ML_BASE_URL = http://129.212.181.238:8002 (no trailing slash) + RIPRAP_ML_API_KEY = + +The router is *transport*-only — it does not own model bytes, weights, +or framework imports. Each specialist that wants remote inference calls +into the helpers below and provides its own local fallback. That keeps +the dependency graph clean: the local code path keeps working when the +RIPRAP_ML_* env is unset (e.g. on first-light dev or in unit tests). +""" +from __future__ import annotations + +import base64 +import logging +import os +import time +from collections.abc import Iterable +from typing import Any + +import httpx + +from app import emissions + +log = logging.getLogger("riprap.inference") + +_BACKEND = os.environ.get("RIPRAP_ML_BACKEND", "auto").lower() +_BASE_URL = os.environ.get("RIPRAP_ML_BASE_URL", "").rstrip("/") +_API_KEY = os.environ.get("RIPRAP_ML_API_KEY", "") +_DEFAULT_TIMEOUT = float(os.environ.get("RIPRAP_ML_TIMEOUT_S", "60")) + + +class RemoteUnreachable(RuntimeError): + """Raised when the remote inference service is unconfigured, down, + times out, or returns 5xx. Callers catch this to fall through to a + local model load. 4xx errors propagate as the generic exception so + a caller bug doesn't get masked by a "fallback to local" path.""" + + +def remote_enabled() -> bool: + """True iff the router is configured to attempt remote calls. + Returns False under explicit `local` mode or when the base URL is + empty (the auto-default with no env config).""" + if _BACKEND == "local": + return False + if not _BASE_URL: + return False + return True + + +def _client(timeout: float | None = None) -> httpx.Client: + headers = {"User-Agent": "riprap-app/0.4.5"} + if _API_KEY: + headers["Authorization"] = f"Bearer {_API_KEY}" + return httpx.Client( + base_url=_BASE_URL, + headers=headers, + timeout=timeout if timeout is not None else _DEFAULT_TIMEOUT, + ) + + +def _post(path: str, payload: dict[str, Any], timeout: float | None = None) -> dict: + """POST {payload} as JSON to the remote service's `path`. Returns the + parsed JSON body. Raises RemoteUnreachable on transport errors; + raises HTTPStatusError on 4xx so caller bugs surface.""" + if not remote_enabled(): + raise RemoteUnreachable("remote ML backend not configured " + "(RIPRAP_ML_BASE_URL empty or BACKEND=local)") + t0 = time.monotonic() + try: + with _client(timeout) as c: + r = c.post(path, json=payload) + except (httpx.ConnectError, httpx.ReadError, httpx.WriteError, + httpx.TimeoutException, httpx.RemoteProtocolError) as e: + raise RemoteUnreachable(f"{type(e).__name__}: {e}") from e + if r.status_code >= 500: + raise RemoteUnreachable(f"HTTP {r.status_code} from {path}: {r.text[:200]}") + r.raise_for_status() + duration_s = time.monotonic() - t0 + # Hardware: msradam/riprap-vllm runs on NVIDIA L4. Operators can + # override via RIPRAP_HARDWARE_LABEL. The proxy reports per-call + # GPU energy off NVML in the X-GPU-Energy-J / X-GPU-Power-W headers + # — read those for a real measurement instead of the data-sheet + # estimate when present. + override = (os.environ.get("RIPRAP_HARDWARE_LABEL") or "").lower() + if "mi300x" in override or "amd" in override: + hw = "amd_mi300x" + elif "t4" in override: + hw = "nvidia_t4" + else: + hw = "nvidia_l4" + joules_real, power_w_real = _parse_gpu_headers(r.headers) + emissions.active().record_ml( + endpoint=path, + backend="riprap-models", + hardware=hw, + duration_s=duration_s, + joules_real=joules_real, + power_w_real=power_w_real, + ) + return r.json() + + +def _parse_gpu_headers(headers) -> tuple[float | None, float | None]: + """Pull (joules, watts) from X-GPU-Energy-J / X-GPU-Power-W if the + proxy attached them. Returns (None, None) if the headers are absent + (older proxy build, NVML init failed, or the call streamed).""" + def _f(name: str) -> float | None: + v = headers.get(name) + if v is None or v == "": + return None + try: + return float(v) + except ValueError: + return None + return _f("x-gpu-energy-j"), _f("x-gpu-power-w") + + +def _serialize_array(arr) -> str: + """numpy/torch tensor → base64-encoded float32 raw bytes for transport. + Each remote handler decodes to (shape, dtype=float32) and reconstructs. + Reasonable round-trip for chips up to a few MB; large rasters should + use compressed numpy-savez instead — TODO when a model needs > 8 MB.""" + import numpy as np + np_arr = arr if isinstance(arr, np.ndarray) else _to_numpy(arr) + np_arr = np_arr.astype("float32", copy=False) + return base64.b64encode(np_arr.tobytes()).decode("ascii") + + +def _to_numpy(t): + """Best-effort tensor → numpy. Accepts torch.Tensor or numpy already.""" + try: + import torch + if isinstance(t, torch.Tensor): + return t.detach().cpu().numpy() + except ImportError: + pass + import numpy as np + return np.asarray(t) + + +def _deserialize_array(b64: str, shape: list[int]): + """Inverse of _serialize_array — bytes → numpy float32 with given shape.""" + import numpy as np + raw = base64.b64decode(b64) + return np.frombuffer(raw, dtype="float32").reshape(shape) + + +# ---- Public router entry points ------------------------------------------- + +def healthcheck(timeout: float = 3.0) -> bool: + """Quick reachability probe. True if the service responds 200 to GET + /healthz within `timeout` seconds. Used by /api/backend so the UI can + show whether the remote ML backend is currently live.""" + if not remote_enabled(): + return False + try: + with _client(timeout) as c: + r = c.get("/healthz") + return r.status_code == 200 + except Exception: + return False + + +def backend_info() -> dict[str, Any]: + """Snapshot for /api/backend — what the UI should advertise.""" + return { + "backend": _BACKEND, + "base_url": _BASE_URL or None, + "remote_enabled": remote_enabled(), + "reachable": healthcheck() if remote_enabled() else False, + } + + +def prithvi_pluvial(s2_chip, *, scene_id: str | None = None, + scene_datetime: str | None = None, + cloud_cover: float | None = None, + timeout: float | None = None) -> dict[str, Any]: + """Remote forward pass through Prithvi-NYC-Pluvial v2. + Input: 6-band Sentinel-2 chip (numpy or torch, shape [6, H, W]). + Output: { ok, pct_water_within_500m, pct_water_full, scene_id, ... }. + Raises RemoteUnreachable if the service is down.""" + arr = _to_numpy(s2_chip) + return _post("/v1/prithvi-pluvial", { + "s2": _serialize_array(arr), + "shape": list(arr.shape), + "scene_id": scene_id, + "scene_datetime": scene_datetime, + "cloud_cover": cloud_cover, + }, timeout=timeout) + + +def terramind(adapter: str, s2l2a=None, s1rtc=None, dem=None, *, + timeout: float | None = None) -> dict[str, Any]: + """Remote forward through TerraMind-NYC-Adapters (LULC or Buildings) + or the v1 base generative path (synthesis). `adapter` is one of: + lulc, buildings, synthesis. Each modality is a numpy array, torch + tensor, or None — `synthesis` only needs DEM; the LoRA adapters + need at minimum S2L2A.""" + payload: dict[str, Any] = {"adapter": adapter} + if s2l2a is not None: + s2_np = _to_numpy(s2l2a) + payload["s2"] = _serialize_array(s2_np) + payload["s2_shape"] = list(s2_np.shape) + if s1rtc is not None: + s1_np = _to_numpy(s1rtc) + payload["s1"] = _serialize_array(s1_np) + payload["s1_shape"] = list(s1_np.shape) + if dem is not None: + dem_np = _to_numpy(dem) + payload["dem"] = _serialize_array(dem_np) + payload["dem_shape"] = list(dem_np.shape) + return _post("/v1/terramind", payload, timeout=timeout) + + +def ttm_forecast(model: str, history: Iterable[float], *, + context_length: int, prediction_length: int, + cadence: str = "h", + timeout: float | None = None) -> dict[str, Any]: + """Remote Granite TTM r2 forecast. + `model` is one of: zero_shot_battery, fine_tune_battery, weekly_311, + floodnet_recurrence — the service decides which checkpoint to use. + `history` is a 1-D iterable of floats (the time series); `cadence` + is for the service's labelling (h / d / w / 6m). Output shape is + `{ ok, forecast: [...], peak_index, peak_value }`.""" + series = list(map(float, history)) + return _post("/v1/ttm-forecast", { + "model": model, + "history": series, + "context_length": context_length, + "prediction_length": prediction_length, + "cadence": cadence, + }, timeout=timeout) + + +def granite_embed(texts: list[str], *, + timeout: float | None = None) -> dict[str, Any]: + """Remote Granite Embedding 278M batch encode. + Output: { ok, vectors: [[float, ...], ...] }. Vector dimension fixed + at 768 (granite-embedding-278m-multilingual).""" + return _post("/v1/granite-embed", {"texts": list(texts)}, timeout=timeout) + + +def gliner_extract(text: str, labels: list[str], *, + timeout: float | None = None) -> dict[str, Any]: + """Remote GLiNER typed-entity extraction. + Output: { ok, entities: [{label, text, start, end, score}, ...] }.""" + return _post("/v1/gliner-extract", { + "text": text, "labels": list(labels), + }, timeout=timeout) diff --git a/app/intents/__init__.py b/app/intents/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec366de7749d402cf5de3ed4450ba4358e73fb2f --- /dev/null +++ b/app/intents/__init__.py @@ -0,0 +1,3 @@ +"""Per-intent execution modules. Each intent knows how to take a planner +Plan and run only the specialists relevant to it, returning a +reconciler-ready set of documents and a paragraph.""" diff --git a/app/intents/development_check.py b/app/intents/development_check.py new file mode 100644 index 0000000000000000000000000000000000000000..214515a23adafdf4c288cfd56291ba3dfaca9a54 --- /dev/null +++ b/app/intents/development_check.py @@ -0,0 +1,328 @@ +"""development_check intent — "what are they building in and is it risky?" + +Pipeline: + 1. Resolve target text → NTA polygon + 2. Pull active DOB construction permits (NB / A1 / DM, last ~18 mo) + inside the polygon + 3. Cross-reference each permit with the Sandy + DEP scenarios already + loaded in memory + 4. Aggregate counts; rank flagged projects by severity + 5. Reconcile via Granite 4.1 with a development-briefing prompt that + names specific projects and addresses +""" +from __future__ import annotations + +import logging +import time +from typing import Any + +from app import llm +from app.areas import nta +from app.context import dob_permits +from app.rag import retrieve as rag_retrieve + +log = logging.getLogger("riprap.intent.development_check") + +# Reconciler model — see app/reconcile.py for the env-var contract. +import os as _os # noqa: E402 + +OLLAMA_MODEL = _os.environ.get("RIPRAP_RECONCILER_MODEL", + _os.environ.get("RIPRAP_OLLAMA_MODEL", "granite4.1:8b")) + +EXTRA_SYSTEM_PROMPT = """Write a flood-exposure briefing about active construction in an NYC neighborhood. Use ONLY the facts in the provided documents. + +Output this markdown skeleton verbatim, filling each `<...>` with content drawn only from the documents. After every numerical claim, append the document id in square brackets — e.g. ` [dob_permits]`. Bold at most one phrase per section using `**...**`. Omit any section whose supporting facts are absent from the documents. + +``` +**Status.** +. + +**Flagged projects.** +- ([dob_permits]). issued ; owner . . +- +- + +**Pattern.** +<1-2 sentences observing which streets concentrate the flagged projects and the new-building / major-alteration mix from [dob_permits]>. + +**Policy context.** +<1 sentence per RAG hit, citing the agency name and [rag_*]>. +``` + +Constraints: +- Copy addresses, BBLs, dates, and owner names verbatim from the documents — no paraphrasing. +- If [dob_permits] reports 0 flagged projects, omit the **Flagged projects.** section and say so in **Status.**. +- If only [nta_resolve] is present and no [dob_permits], output exactly: `No grounded data available for this neighborhood.` +""" + + +def run(plan, query: str, progress_q=None, strict: bool = False) -> dict[str, Any]: # TODO(cleanup): cc-grade-D (27) + """Execute the development_check Plan. If progress_q is provided + (a queue.Queue), each finalized step record is put on it so a + streaming endpoint can render the trace live. + + strict=True routes through Mellea-validated reconciliation (rejection + sampling against four grounding requirements). Disables token + streaming — the briefing arrives in one shot after Mellea's loop + settles. Trace gains a `mellea_validate` row with rerolls + which + requirements passed. + """ + t0 = time.time() + trace: list[dict] = [] + + def _emit(r: dict): + if progress_q is not None: + progress_q.put({"kind": "step", **r}) + + target_text = next( + (t["text"] for t in plan.targets if t.get("type") in ("nta", "borough")), + None, + ) + rec = {"step": "nta_resolve", "started_at": t0, "ok": False} + trace.append(rec) + # Try the planner's target first; if it didn't pick one, fall back to + # scanning the raw query text for any known neighborhood/borough name. + matches = nta.resolve(target_text) if target_text else [] + if not matches: + log.info("planner gave no usable target (%r); scanning query %r", + target_text, query) + matches = nta.resolve_from_text(query) + if not matches: + rec["err"] = f"no NTA match in target={target_text!r} or query={query!r}" + rec["elapsed_s"] = round(time.time() - t0, 2) + return _empty(plan, query, trace, error=rec["err"]) + target = matches[0] + rec["ok"] = True + rec["result"] = {"nta_code": target["nta_code"], + "nta_name": target["nta_name"], + "borough": target["borough"], + "bbox": list(target["geometry"].bounds)} + rec["elapsed_s"] = round(time.time() - t0, 2) + _emit(rec) + + poly = target["geometry"] + docs: list[dict] = [] + permits_summary = None + rag_out: list = [] + + # ---- DOB permits ------------------------------------------------------ + p_t0 = time.time() + prec = {"step": "dob_permits_nta", "started_at": p_t0, "ok": False} + trace.append(prec) + try: + # top_n=5: 5 flagged projects in the doc context is the sweet spot — + # rich enough for a journalist briefing, cheap enough to stay under + # ~25 s reconcile on T4 with the 8b model. + permits_summary = dob_permits.summary_for_polygon(poly, top_n=5) + prec["ok"] = True + prec["result"] = { + "n_total": permits_summary["n_total"], + "n_in_sandy": permits_summary["n_in_sandy"], + "n_in_dep_any": permits_summary["n_in_dep_any"], + # Pin data so the UI can render permits the moment this step + # finishes, instead of waiting for the `final` event. + "all_pins": permits_summary["all_pins"], + } + except Exception as e: + prec["err"] = str(e) + log.exception("dob_permits failed") + prec["elapsed_s"] = round(time.time() - p_t0, 2) + _emit(prec) + + # ---- RAG -------------------------------------------------------------- + if "rag" in plan.specialists: + r_t0 = time.time() + rrec = {"step": "rag_dev", "started_at": r_t0, "ok": False} + trace.append(rrec) + try: + q = (f"flood resilience new construction development {target['nta_name']} " + f"{target['borough']} hardening building code") + rag_out = rag_retrieve(q, k=2, min_score=0.50) + rrec["ok"] = True + rrec["result"] = {"hits": len(rag_out)} + except Exception as e: + rrec["err"] = str(e) + rrec["elapsed_s"] = round(time.time() - r_t0, 2) + _emit(rrec) + + # ---- documents -------------------------------------------------------- + docs.append(_doc("nta_resolve", [ + "Source: NYC DCP Neighborhood Tabulation Areas 2020.", + f"Target neighborhood: {target['nta_name']} (NTA {target['nta_code']}), " + f"in the borough of {target['borough']}.", + ])) + if permits_summary: + ps = permits_summary + body = [ + "Source: NYC DOB Permit Issuance (Socrata ipu4-2q9a), filtered to " + "active New Building / Major Alteration / Demolition jobs in the " + "trailing 18 months. Cross-referenced with NYC Sandy 2012 " + "inundation extent and 3 DEP Stormwater scenarios.", + f"Total active major-construction projects in {target['nta_name']}: " + f"{ps['n_total']}.", + f"Of these: {ps['n_in_sandy']} fall inside the 2012 Sandy " + f"inundation zone; {ps['n_in_dep_any']} fall inside at least one " + f"DEP Stormwater scenario; {ps['n_in_dep_severe']} fall in the " + f"deeper DEP bands (1-4 ft or >4 ft).", + ] + if ps.get("by_job_type"): + mix = "; ".join(f"{n} {k}" for k, n in ps["by_job_type"].items()) + body.append(f"Job-type mix: {mix}.") + for p in ps["flagged_top"]: + scen_str = (", ".join(p["dep_scenarios"]) or "none") + body.append( + f"- {p['address']}, {p['borough']} (BBL {p.get('bbl') or 'unknown'}). " + f"{p['job_type_label']}, permit issued {p['issuance_date']}, " + f"status {p['permit_status']}. " + f"Owner: {p.get('owner_business') or 'unknown'}. " + f"In Sandy zone: {p['in_sandy']}; in DEP scenarios: {scen_str}; " + f"max DEP depth class: {p['dep_max_class']}." + ) + docs.append(_doc("dob_permits", body)) + for h in rag_out: + docs.append(_doc(h["doc_id"], [ + f"Source: {h['citation']}, page {h.get('page', '')}.", + f"Retrieved passage (verbatim): {h['text']}", + ])) + + # ---- reconcile -------------------------------------------------------- + rec_t0 = time.time() + rec_step = {"step": "reconcile_development", "started_at": rec_t0, "ok": False} + trace.append(rec_step) + paragraph = "" + audit = {"raw": "", "dropped": []} + mellea_meta = None + if len(docs) <= 1: + paragraph = ("**Status.** No active construction permit data available " + f"for {target['nta_name']} [nta_resolve].") + audit = {"raw": paragraph, "dropped": []} + rec_step["ok"] = True + elif strict: + # Streaming Mellea path: tokens stream during each attempt; on + # validation failure we emit a mellea_attempt event and reroll. + rec_step["step"] = "mellea_reconcile_development" + try: + from app.framing import augment_system_prompt + from app.mellea_validator import DEFAULT_LOOP_BUDGET, reconcile_strict_streaming + from app.reconcile import trim_docs_to_plan as _trim + docs = _trim(docs, set(plan.specialists or [])) + def _on_token(delta: str, attempt_idx: int): + if progress_q is not None: + progress_q.put({"kind": "token", "delta": delta, + "attempt": attempt_idx}) + def _on_attempt_end(attempt_idx, passed, failed): + if progress_q is not None: + progress_q.put({"kind": "mellea_attempt", + "attempt": attempt_idx, + "passed": passed, "failed": failed}) + framed_prompt = augment_system_prompt( + EXTRA_SYSTEM_PROMPT, query=query, intent=plan.intent, + ) + mres = reconcile_strict_streaming( + docs, framed_prompt, + user_prompt="Write the development briefing now.", + model=OLLAMA_MODEL, loop_budget=DEFAULT_LOOP_BUDGET, + on_token=_on_token if progress_q else None, + on_attempt_end=_on_attempt_end if progress_q else None, + ) + paragraph = mres["paragraph"] + audit = {"raw": paragraph, "dropped": []} + mellea_meta = { + "rerolls": mres["rerolls"], + "n_attempts": mres["n_attempts"], + "requirements_passed": mres["requirements_passed"], + "requirements_failed": mres["requirements_failed"], + "requirements_total": mres["requirements_total"], + "model": mres["model"], + "loop_budget": mres["loop_budget"], + } + rec_step["ok"] = True + rec_step["result"] = { + "rerolls": mellea_meta["rerolls"], + "passed": f"{len(mellea_meta['requirements_passed'])}/{mellea_meta['requirements_total']}", + "paragraph_chars": len(paragraph), + } + except Exception as e: + rec_step["err"] = str(e) + log.exception("Mellea-validated reconcile failed") + paragraph = "" + audit = {"raw": "", "dropped": []} + else: + def _on_token(delta: str): + if progress_q is not None: + progress_q.put({"kind": "token", "delta": delta}) + try: + paragraph, audit = _reconcile(docs, on_token=_on_token if progress_q else None) + rec_step["ok"] = True + rec_step["result"] = {"paragraph_chars": len(paragraph), + "dropped": len(audit["dropped"])} + except Exception as e: + rec_step["err"] = str(e) + log.exception("development reconcile failed") + rec_step["elapsed_s"] = round(time.time() - rec_t0, 2) + _emit(rec_step) + + target_safe = {k: v for k, v in target.items() if k != "geometry"} + target_safe["bbox"] = list(target["geometry"].bounds) + return { + "intent": "development_check", + "query": query, + "plan": { + "intent": plan.intent, + "targets": plan.targets, + "specialists": plan.specialists, + "rationale": plan.rationale, + }, + "target": target_safe, + "n_matches": len(matches), + "dob_summary": permits_summary, + "rag": rag_out, + "paragraph": paragraph, + "audit": audit, + "mellea": mellea_meta, + "trace": trace, + "total_s": round(time.time() - t0, 2), + } + + +def _doc(doc_id: str, body_lines: list[str]) -> dict: + return {"role": f"document {doc_id}", "content": "\n".join(body_lines)} + + +def _reconcile(docs: list[dict], on_token=None) -> tuple[str, dict]: + from app.reconcile import verify_paragraph + messages = docs + [ + {"role": "system", "content": EXTRA_SYSTEM_PROMPT}, + {"role": "user", "content": "Write the development briefing now."}, + ] + # num_ctx 6144 covers a typical dev_check prompt: system ~700 + nta + # doc + DOB body with 5 flagged projects ~3000 + RAG hits ~1000. + # 12288 was over-allocating KV cache — costly on T4. num_predict caps + # the briefing at ~600 tokens (4 sections + 5 bullet projects). + OPTS = {"temperature": 0, "num_ctx": 6144, "num_predict": 600} + if on_token is None: + resp = llm.chat(model=OLLAMA_MODEL, messages=messages, options=OPTS) + raw = resp["message"]["content"].strip() + else: + chunks: list[str] = [] + for chunk in llm.chat(model=OLLAMA_MODEL, messages=messages, + stream=True, options=OPTS): + delta = (chunk.get("message") or {}).get("content") or "" + if delta: + chunks.append(delta) + on_token(delta) + raw = "".join(chunks).strip() + cleaned, dropped = verify_paragraph(raw, docs) + return cleaned, {"raw": raw, "dropped": dropped} + + +def _empty(plan, query, trace, error): + return { + "intent": "development_check", + "query": query, + "error": error, + "plan": {"intent": plan.intent, "targets": plan.targets, + "specialists": plan.specialists, "rationale": plan.rationale}, + "trace": trace, + "paragraph": f"Could not resolve target to an NTA: {error}", + } diff --git a/app/intents/live_now.py b/app/intents/live_now.py new file mode 100644 index 0000000000000000000000000000000000000000..3726482d20a9f7886f186d9d0940176a0b7a0db7 --- /dev/null +++ b/app/intents/live_now.py @@ -0,0 +1,239 @@ +"""live_now intent — only fire live specialists. No geocode, no static +historic/modeled layers. Reconciler emits a "right now" status note. + +Targets are usually `{"type": "nyc"}` for the whole city; if the user +named a specific borough we still query at the same gauges (NOAA only +has 3 NYC stations) and the same NWS forecast zones (the API takes a +lat/lon point — we use a borough centroid). +""" +from __future__ import annotations + +import logging +import time +from typing import Any + +from app import llm +from app.context import noaa_tides, nws_alerts, nws_obs +from app.live import ttm_forecast + +log = logging.getLogger("riprap.intent.live_now") + +import os as _os # noqa: E402 + +# live_now stays on the smaller model: short outputs, speed matters more. +OLLAMA_MODEL = _os.environ.get("RIPRAP_LIVE_MODEL", + _os.environ.get("RIPRAP_OLLAMA_MODEL", "granite4.1:3b")) + +# NWS API requires a lat/lon point; pick a representative one per borough. +BOROUGH_POINTS = { + "Manhattan": (40.7831, -73.9712), # Central Park + "Brooklyn": (40.6500, -73.9500), # Park Slope-ish + "Queens": (40.7282, -73.7949), # Forest Hills + "Bronx": (40.8448, -73.8648), # Fordham + "Staten Island": (40.5795, -74.1502), # central SI + "NYC": (40.7128, -74.0060), # Lower Manhattan default +} + + +EXTRA_SYSTEM_PROMPT = """Write a current-conditions flood briefing for NYC. Use ONLY the facts in the provided documents. + +Output this markdown skeleton verbatim, filling each `<...>` with content drawn only from the documents. After every numerical claim, append the document id in square brackets — e.g. ` [noaa_tides]`. Bold at most one phrase per section using `**...**`. Omit any section whose supporting facts are absent from the documents. + +``` +**Status.** +. + +**Live signals.** +<1-3 sentences citing each live signal that fired: NWS alerts from [nws_alerts], tide observation and residual from [noaa_tides], recent precipitation from [nws_obs], any TTM forecast peak from [ttm_forecast]>. +``` + +Constraints: +- Be brief — current-conditions reports are read in seconds. +- Copy numerical values verbatim from documents. Do not round. +- Do not invoke historic events (Sandy, Ida) — this is a now-only report. +- If every live document indicates calm, write only: `**Status.** No active flood-relevant signals at this time [live_target].` +""" + + +def run(plan, query: str, progress_q=None) -> dict[str, Any]: # TODO(cleanup): cc-grade-E (32) + t0 = time.time() + trace: list[dict] = [] + + def _emit(r: dict): + if progress_q is not None: + progress_q.put({"kind": "step", **r}) + + boro = next((t.get("text") for t in plan.targets if t.get("type") == "borough"), None) + if boro and boro in BOROUGH_POINTS: + lat, lon = BOROUGH_POINTS[boro] + place = boro + else: + lat, lon = BOROUGH_POINTS["NYC"] + place = "NYC" + + docs: list[dict] = [] + tides_out = alerts_out = obs_out = ttm_out = None + + if "noaa_tides" in plan.specialists: + tides_out = _run_step(trace, "noaa_tides", lambda: noaa_tides.summary_for_point(lat, lon), progress_q) + if "nws_alerts" in plan.specialists: + alerts_out = _run_step(trace, "nws_alerts", lambda: nws_alerts.summary_for_point(lat, lon), progress_q) + if "nws_obs" in plan.specialists: + obs_out = _run_step(trace, "nws_obs", lambda: nws_obs.summary_for_point(lat, lon), progress_q) + if "ttm_forecast" in plan.specialists: + ttm_out = _run_step(trace, "ttm_forecast", lambda: ttm_forecast.summary_for_point(lat, lon), progress_q) + + # ---- documents ---- + docs.append({"role": "document live_target", "content": + f"Source: planner. Live-conditions report for {place}. " + f"Coordinates used for NWS lookups: {lat:.4f}, {lon:.4f}."}) + + if tides_out and tides_out.get("observed_ft_mllw") is not None: + body = [ + f"Source: NOAA CO-OPS Tides & Currents. Station: {tides_out['station_name']} " + f"(NOAA {tides_out['station_id']}, {tides_out['distance_km']} km from {place}).", + f"Observation time: {tides_out.get('obs_time') or 'unknown'}.", + f"Observed water level: {tides_out['observed_ft_mllw']} ft above MLLW.", + ] + if tides_out.get("predicted_ft_mllw") is not None: + body.append(f"Astronomical tide prediction at the same instant: " + f"{tides_out['predicted_ft_mllw']} ft.") + if tides_out.get("residual_ft") is not None: + body.append(f"Residual (observed - predicted): {tides_out['residual_ft']} ft. " + f"Positive = surge component above tide; negative = setdown.") + docs.append(_doc("noaa_tides", body)) + + if alerts_out and alerts_out.get("n_active", 0) > 0: + body = [f"Source: NWS Public Alerts API. Active flood-relevant alerts: " + f"{alerts_out['n_active']}."] + for a in alerts_out["alerts"][:4]: + body.append( + f"- {a.get('event','?')} (severity: {a.get('severity','?')}, " + f"urgency: {a.get('urgency','?')}); expires {a.get('expires','')[:16]}; " + f"area: {(a.get('areaDesc') or '')[:120]}." + ) + if a.get("headline"): + body.append(f" Headline: {a['headline'][:240]}") + docs.append(_doc("nws_alerts", body)) + + if obs_out and (obs_out.get("precip_last_hour_mm") is not None + or obs_out.get("precip_last_6h_mm") is not None): + body = [ + f"Source: NWS Station Observations. Nearest ASOS: {obs_out['station_name']} " + f"({obs_out['station_id']}, {obs_out['distance_km']} km).", + f"Observation time: {obs_out.get('obs_time') or 'unknown'}.", + ] + if obs_out.get("precip_last_hour_mm") is not None: + body.append(f"Precipitation last 1 h: {obs_out['precip_last_hour_mm']} mm.") + if obs_out.get("precip_last_6h_mm") is not None: + body.append(f"Precipitation last 6 h: {obs_out['precip_last_6h_mm']} mm.") + docs.append(_doc("nws_obs", body)) + + if ttm_out and ttm_out.get("available") and ttm_out.get("interesting"): + docs.append(_doc("ttm_forecast", [ + "Source: Granite TimeSeries TTM r2 (Ekambaram et al. 2024).", + f"Forecast peak surge residual at {ttm_out['station_name']}: " + f"{ttm_out['forecast_peak_ft']} ft, expected " + f"{ttm_out['forecast_peak_minutes_ahead']} minutes from now.", + f"Recent peak |residual| in context window: " + f"{ttm_out['history_peak_abs_ft']} ft.", + ])) + + # ---- reconcile ---- + rec_t0 = time.time() + rec_step = {"step": "reconcile_live_now", "started_at": rec_t0, "ok": False} + trace.append(rec_step) + if not docs or len(docs) == 1: # only the live_target doc, no actual signals + paragraph = ("**Status.** **No active flood-relevant signals at this time** for " + f"{place} [live_target].") + audit = {"raw": paragraph, "dropped": []} + rec_step["ok"] = True + else: + def _on_token(delta: str): + if progress_q is not None: + progress_q.put({"kind": "token", "delta": delta}) + try: + from app.framing import augment_system_prompt + framed_prompt = augment_system_prompt( + EXTRA_SYSTEM_PROMPT, query=query, intent=plan.intent, + ) + paragraph, audit = _reconcile( + docs, on_token=_on_token if progress_q else None, + system_prompt=framed_prompt, + ) + rec_step["ok"] = True + except Exception as e: + paragraph = "Could not produce a live-conditions report." + audit = {"raw": "", "dropped": []} + rec_step["err"] = str(e) + rec_step["elapsed_s"] = round(time.time() - rec_t0, 2) + _emit(rec_step) + + return { + "intent": "live_now", + "query": query, + "place": place, + "plan": { + "intent": plan.intent, + "targets": plan.targets, + "specialists": plan.specialists, + "rationale": plan.rationale, + }, + "noaa_tides": tides_out, + "nws_alerts": alerts_out, + "nws_obs": obs_out, + "ttm_forecast": ttm_out, + "paragraph": paragraph, + "audit": audit, + "trace": trace, + "total_s": round(time.time() - t0, 2), + } + + +def _run_step(trace: list, name: str, fn, progress_q=None) -> Any: + t0 = time.time() + rec = {"step": name, "started_at": t0, "ok": False} + trace.append(rec) + try: + out = fn() + rec["ok"] = True + rec["result"] = {k: out.get(k) for k in list(out.keys())[:3]} if isinstance(out, dict) else None + return out + except Exception as e: + rec["err"] = str(e) + log.exception("%s failed", name) + return None + finally: + rec["elapsed_s"] = round(time.time() - t0, 2) + if progress_q is not None: + progress_q.put({"kind": "step", **rec}) + + +def _doc(doc_id: str, body_lines: list[str]) -> dict: + return {"role": f"document {doc_id}", "content": "\n".join(body_lines)} + + +def _reconcile(docs: list[dict], on_token=None, + system_prompt: str = EXTRA_SYSTEM_PROMPT) -> tuple[str, dict]: + from app.reconcile import verify_paragraph + messages = docs + [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": "Write the live-conditions briefing now."}, + ] + # live_now is the smallest intent: ~4 live docs, short briefing. + # num_predict 200 caps to a 2-section status note. + OPTS = {"temperature": 0, "num_ctx": 2048, "num_predict": 200} + if on_token is None: + resp = llm.chat(model=OLLAMA_MODEL, messages=messages, options=OPTS) + raw = resp["message"]["content"].strip() + else: + chunks: list[str] = [] + for chunk in llm.chat(model=OLLAMA_MODEL, messages=messages, + stream=True, options=OPTS): + delta = (chunk.get("message") or {}).get("content") or "" + if delta: + chunks.append(delta) + on_token(delta) + raw = "".join(chunks).strip() + cleaned, dropped = verify_paragraph(raw, docs) + return cleaned, {"raw": raw, "dropped": dropped} diff --git a/app/intents/neighborhood.py b/app/intents/neighborhood.py new file mode 100644 index 0000000000000000000000000000000000000000..a385c1caf50027108189799c41379e103fa8fcad --- /dev/null +++ b/app/intents/neighborhood.py @@ -0,0 +1,501 @@ +"""neighborhood intent — resolve target text to one or more NTA polygons, +then run polygon-level specialists and reconcile. + +The set of polygon-capable specialists is currently: + - sandy_inundation.coverage_for_polygon + - dep_stormwater.coverage_for_polygon (per scenario) + - nyc311.summary_for_polygon + - microtopo.microtopo_for_polygon + +Other specialists (FloodNet, Ida HWM, Prithvi) are still point-based; +in Phase 2 we'll add polygon support for them. For now, neighborhood +mode produces the four signals above + RAG, and the reconciler emits +a structurally-different briefing aimed at a place rather than an +address. +""" +from __future__ import annotations + +import logging +import time +from typing import Any + +from app import llm +from app.areas import nta +from app.context import microtopo, nyc311 +from app.flood_layers import dep_stormwater, sandy_inundation +from app.rag import retrieve as rag_retrieve +from app.reconcile import citations_from_docs + +log = logging.getLogger("riprap.intent.neighborhood") + +import os as _os # noqa: E402 + +OLLAMA_MODEL = _os.environ.get("RIPRAP_RECONCILER_MODEL", + _os.environ.get("RIPRAP_OLLAMA_MODEL", "granite4.1:8b")) + +EXTRA_SYSTEM_PROMPT = """Write a flood-exposure briefing for an NYC neighborhood. Use ONLY the facts in the provided documents. + +Output this markdown skeleton verbatim, filling each `<...>` with content drawn only from the documents. After every numerical claim, append the document id in square brackets — e.g. ` [sandy_nta]`. Bold at most one phrase per section using `**...**`. Omit any section whose supporting facts are absent from the documents. + +``` +**Status.** +. + +**Empirical evidence.** +<1-3 sentences citing observed flood evidence: Sandy coverage from [sandy_nta], 311 counts from [nyc311_nta], any FloodNet or HWM signals>. + +**Modeled scenarios.** +<1-2 sentences citing modeled flooding from [dep_*_nta] (fraction of polygon in each scenario) and terrain from [microtopo_nta] (median HAND, fraction of polygon with HAND below 1 m)>. + +**Policy context.** +<1 sentence per RAG hit, citing the agency name and [rag_*]>. +``` + +Constraints: +- Copy numerical values verbatim from documents. Do not round, paraphrase, or substitute. +- Speak about the place as a polygon (use phrases like "of the neighborhood" or "of the NTA"), not as an address. +- If only [nta_resolve] is present and no other documents, output exactly: `No grounded data available for this neighborhood.` +""" + + +def run(plan, query: str, progress_q=None, strict: bool = False) -> dict[str, Any]: # TODO(cleanup): cc-grade-F (73) + """Execute the planner's neighborhood Plan. + + Resolves all targets to NTAs, picks the largest matching NTA (or the + first if multiple equally good), runs the polygon specialists, and + reconciles via Granite 4.1. + + strict=True routes the reconciler through Mellea-validated rejection + sampling. Disables token streaming. + """ + t0 = time.time() + trace: list[dict] = [] + + def _emit(r: dict): + if progress_q is not None: + progress_q.put({"kind": "step", **r}) + + # Resolve targets to NTAs. Try the planner's pick first; if it gave no + # usable target, scan the raw query text for any known neighborhood name. + target_text = next( + (t["text"] for t in plan.targets if t.get("type") in ("nta", "borough")), + None, + ) + rec = {"step": "nta_resolve", "started_at": t0, "ok": False} + trace.append(rec) + matches = nta.resolve(target_text) if target_text else [] + if not matches: + matches = nta.resolve_from_text(query) + if not matches: + rec["err"] = f"no NTA match in target={target_text!r} or query={query!r}" + rec["elapsed_s"] = round(time.time() - t0, 2) + return _empty_result(plan, query, trace, error=rec["err"]) + target = matches[0] + rec["ok"] = True + rec["result"] = { + "nta_code": target["nta_code"], + "nta_name": target["nta_name"], + "borough": target["borough"], + "n_matches": len(matches), + # Bbox lets the UI fly-to and render the polygon while the rest + # of the specialists are still running. + "bbox": list(target["geometry"].bounds), + } + rec["elapsed_s"] = round(time.time() - t0, 2) + _emit(rec) + + poly = target["geometry"] + docs: list[dict] = [] + sandy_out = None + dep_out = {} + nyc311_out = None + micro_out = None + rag_out = [] + prithvi_live_out = None + terramind_out = None + + # ---- sandy ---- + if "sandy" in plan.specialists: + s_t0 = time.time() + srec = {"step": "sandy_nta", "started_at": s_t0, "ok": False} + trace.append(srec) + try: + sandy_out = sandy_inundation.coverage_for_polygon(poly) + srec["ok"] = True + srec["result"] = {"fraction": sandy_out["fraction"], "inside": sandy_out["inside"]} + except Exception as e: + srec["err"] = str(e) + log.exception("sandy polygon failed") + srec["elapsed_s"] = round(time.time() - s_t0, 2) + _emit(srec) + + # ---- dep_stormwater ---- + if "dep_stormwater" in plan.specialists: + for scen in ["dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current"]: + d_t0 = time.time() + drec = {"step": f"{scen}_nta", "started_at": d_t0, "ok": False} + trace.append(drec) + try: + cov = dep_stormwater.coverage_for_polygon(poly, scen) + dep_out[scen] = cov + drec["ok"] = True + drec["result"] = {"fraction_any": cov["fraction_any"]} + except Exception as e: + drec["err"] = str(e) + log.exception("%s polygon failed", scen) + drec["elapsed_s"] = round(time.time() - d_t0, 2) + _emit(drec) + + # ---- nyc311 ---- + if "nyc311" in plan.specialists: + n_t0 = time.time() + nrec = {"step": "nyc311_nta", "started_at": n_t0, "ok": False} + trace.append(nrec) + try: + nyc311_out = nyc311.summary_for_polygon(poly, years=3) + nrec["ok"] = True + nrec["result"] = {"n": nyc311_out["n"]} + except Exception as e: + nrec["err"] = str(e) + log.exception("nyc311 polygon failed") + nrec["elapsed_s"] = round(time.time() - n_t0, 2) + _emit(nrec) + + # ---- microtopo ---- + if "microtopo" in plan.specialists: + m_t0 = time.time() + mrec = {"step": "microtopo_nta", "started_at": m_t0, "ok": False} + trace.append(mrec) + try: + micro_out = microtopo.microtopo_for_polygon(poly) + mrec["ok"] = micro_out is not None + mrec["result"] = { + "elev_median_m": (micro_out or {}).get("elev_median_m"), + "frac_hand_lt1": (micro_out or {}).get("frac_hand_lt1"), + } + except Exception as e: + mrec["err"] = str(e) + log.exception("microtopo polygon failed") + mrec["elapsed_s"] = round(time.time() - m_t0, 2) + _emit(mrec) + + # ---- Prithvi-EO live water mask (NTA centroid) ---- + # Polygon-scoped queries don't have a single point of interest, but + # the NTA centroid is a fair sampling point: the 5 km chip the + # specialist fetches comfortably covers any NTA. The reconciler + # gets an `[prithvi_live]` doc with the % water observed today, and + # the frontend gets a GeoJSON layer to paint over the NTA polygon. + try: + from app.flood_layers import prithvi_live as plive_mod + if plive_mod.ENABLE: + p_t0 = time.time() + prec = {"step": "prithvi_eo_live", "started_at": p_t0, "ok": False} + trace.append(prec) + centroid = poly.centroid + prithvi_live_out = plive_mod.fetch(centroid.y, centroid.x) + prec["ok"] = bool(prithvi_live_out and prithvi_live_out.get("ok")) + if prec["ok"]: + prec["result"] = { + "scene_date": (prithvi_live_out.get("item_datetime") or "")[:10], + "cloud_cover": prithvi_live_out.get("cloud_cover"), + "pct_water_5km": prithvi_live_out.get("pct_water_full"), + } + else: + prec["err"] = (prithvi_live_out or {}).get("err") \ + or (prithvi_live_out or {}).get("skipped") or "no observation" + prec["elapsed_s"] = round(time.time() - p_t0, 2) + _emit(prec) + except Exception as e: + log.exception("prithvi_live (neighborhood) failed") + prithvi_live_out = {"ok": False, "err": str(e)} + + # ---- TerraMind synthesis (NTA centroid) ---- + # Generative-prior tier — synthesized ESRI Land Cover from the + # local LiDAR DEM at the NTA centroid. Renders as dashed-outline + # polygons on the map alongside the polygon-aggregated specialists. + try: + from app.context import terramind_synthesis as tm_mod + if tm_mod.ENABLE: + t_t0 = time.time() + trec = {"step": "terramind_synthesis", "started_at": t_t0, "ok": False} + trace.append(trec) + centroid = poly.centroid + terramind_out = tm_mod.fetch(centroid.y, centroid.x) + trec["ok"] = bool(terramind_out and terramind_out.get("ok")) + if trec["ok"]: + trec["result"] = { + "tim_chain": terramind_out.get("tim_chain"), + "dominant_class": terramind_out.get("dominant_class_display") + or terramind_out.get("dominant_class"), + "dominant_pct": terramind_out.get("dominant_pct"), + "n_classes": terramind_out.get("n_classes_observed"), + } + else: + trec["err"] = (terramind_out or {}).get("err") \ + or (terramind_out or {}).get("skipped") or "no synthesis" + trec["elapsed_s"] = round(time.time() - t_t0, 2) + _emit(trec) + except Exception as e: + log.exception("terramind (neighborhood) failed") + terramind_out = {"ok": False, "err": str(e)} + + # ---- rag ---- + if "rag" in plan.specialists: + r_t0 = time.time() + rrec = {"step": "rag_nta", "started_at": r_t0, "ok": False} + trace.append(rrec) + try: + q = (f"flood exposure {target['nta_name']} {target['borough']} " + "vulnerability hardening mitigation") + rag_out = rag_retrieve(q, k=3, min_score=0.45) + rrec["ok"] = True + rrec["result"] = {"hits": len(rag_out)} + except Exception as e: + rrec["err"] = str(e) + log.exception("rag polygon failed") + rrec["elapsed_s"] = round(time.time() - r_t0, 2) + _emit(rrec) + + # ---- build documents ---- + docs.append(_doc("nta_resolve", [ + "Source: NYC DCP Neighborhood Tabulation Areas 2020.", + f"Target neighborhood: {target['nta_name']} (NTA {target['nta_code']}), " + f"in the borough of {target['borough']}.", + f"Community District: {target.get('cdta') or 'unknown'}.", + ])) + if sandy_out and sandy_out["inside"]: + docs.append(_doc("sandy_nta", [ + "Source: NYC Sandy Inundation Zone (NYC OD 5xsi-dfpx).", + f"Fraction of {target['nta_name']} inside the 2012 inundation extent: " + f"{sandy_out['fraction'] * 100:.1f}%.", + f"Total NTA area: {sandy_out['polygon_area_m2']/1e6:.2f} km².", + ])) + for scen, cov in dep_out.items(): + if cov["fraction_any"] > 0: + cls = cov["fraction_class"] + docs.append(_doc(f"{scen}_nta", [ + f"Source: {cov['label']}.", + f"Fraction of {target['nta_name']} inside any modeled flooded area: " + f"{cov['fraction_any'] * 100:.1f}%.", + f"Of which: {cls.get(1, 0) * 100:.1f}% in nuisance band (>4 in to 1 ft), " + f"{cls.get(2, 0) * 100:.1f}% in 1-4 ft band, " + f"{cls.get(3, 0) * 100:.1f}% in >4 ft band.", + ])) + if nyc311_out and nyc311_out.get("n", 0) > 0: + body = [ + "Source: NYC 311 service requests (Socrata erm2-nwe9), aggregated inside the NTA polygon.", + f"Flood-related complaints in the last 3 years inside {target['nta_name']}: " + f"{nyc311_out['n']}.", + ] + if nyc311_out.get("by_descriptor"): + top = "; ".join(f"{k}: {v}" for k, v in list(nyc311_out["by_descriptor"].items())[:3]) + body.append(f"Top descriptors: {top}.") + docs.append(_doc("nyc311_nta", body)) + if micro_out and micro_out.get("n_cells", 0) > 0: + body = [ + "Source: USGS 3DEP DEM (precomputed citywide GeoTIFF) with derived HAND and TWI rasters; aggregated over NTA polygon.", + f"Polygon contains {micro_out['n_cells']} 30-m DEM cells.", + f"Median elevation: {micro_out['elev_median_m']} m; " + f"10th-percentile elevation: {micro_out['elev_p10_m']} m.", + ] + if micro_out.get("hand_median_m") is not None: + body.append( + f"Median HAND (Height Above Nearest Drainage): " + f"{micro_out['hand_median_m']} m. " + f"Fraction of polygon cells with HAND below 1 m " + f"(near-channel, water reaches at flood): " + f"{(micro_out.get('frac_hand_lt1') or 0) * 100:.1f}%." + ) + if micro_out.get("twi_median") is not None: + body.append( + f"Median TWI: {micro_out['twi_median']}. " + f"Fraction of polygon cells with TWI > 10 (saturation-prone): " + f"{(micro_out.get('frac_twi_gt10') or 0) * 100:.1f}%." + ) + docs.append(_doc("microtopo_nta", body)) + if prithvi_live_out and prithvi_live_out.get("ok"): + docs.append(_doc("prithvi_live", [ + "Source: Prithvi-EO 2.0 (Sen1Floods11 fine-tune) live " + "segmentation over a Sentinel-2 L2A scene from Microsoft " + f"Planetary Computer, sampled at the NTA centroid of " + f"{target['nta_name']}.", + f"Sentinel-2 scene id: {prithvi_live_out.get('item_id')}.", + f"Observation date: " + f"{(prithvi_live_out.get('item_datetime') or '')[:10]}.", + f"Cloud cover: {prithvi_live_out.get('cloud_cover', 0):.3f}%.", + f"% water across the 5 km chip around the centroid: " + f"{prithvi_live_out.get('pct_water_full', 0):.2f}.", + ])) + if terramind_out and terramind_out.get("ok"): + body = [ + "Source: TerraMind 1.0 base (IBM/ESA, Apache-2.0) any-to-any " + "generative foundation model. SYNTHETIC PRIOR — generated " + "categorical land-cover from the LiDAR DEM at the NTA " + f"centroid of {target['nta_name']}; not a measurement.", + f"Chain: {' -> '.join(terramind_out.get('tim_chain') or ['DEM','LULC_synthetic'])}.", + f"Diffusion steps: {terramind_out.get('diffusion_steps')}.", + f"Diffusion seed: {terramind_out.get('diffusion_seed')}.", + f"Dominant synthetic class: " + f"{terramind_out.get('dominant_class_display') or terramind_out.get('dominant_class')} " + f"at {terramind_out.get('dominant_pct', 0):.1f}% (tentative ESRI " + "Land Cover labels).", + ] + for label, pct in (terramind_out.get("class_fractions") or {}).items(): + body.append(f" - {label}: {pct:.1f}%") + body.append("Use 'TerraMind generated a plausible synthetic " + "land-cover prior' framing — never 'imaged' or " + "'reconstructed'.") + docs.append(_doc("terramind_synthetic", body)) + for h in rag_out: + docs.append(_doc(h["doc_id"], [ + f"Source: {h['citation']}, page {h.get('page', '')}.", + f"Retrieved passage (verbatim): {h['text']}", + ])) + + # ---- reconcile ---- + rec_t0 = time.time() + rec_step = {"step": "reconcile_neighborhood", "started_at": rec_t0, "ok": False} + trace.append(rec_step) + paragraph = "" + audit = {"raw": "", "dropped": []} + mellea_meta = None + if docs and strict: + rec_step["step"] = "mellea_reconcile_neighborhood" + try: + from app.framing import augment_system_prompt + from app.mellea_validator import DEFAULT_LOOP_BUDGET, reconcile_strict_streaming + from app.reconcile import trim_docs_to_plan as _trim + docs = _trim(docs, set(plan.specialists or [])) + def _on_token(delta: str, attempt_idx: int): + if progress_q is not None: + progress_q.put({"kind": "token", "delta": delta, + "attempt": attempt_idx}) + def _on_attempt_end(attempt_idx, passed, failed): + if progress_q is not None: + progress_q.put({"kind": "mellea_attempt", + "attempt": attempt_idx, + "passed": passed, "failed": failed}) + framed_prompt = augment_system_prompt( + EXTRA_SYSTEM_PROMPT, query=query, intent=plan.intent, + ) + mres = reconcile_strict_streaming( + docs, framed_prompt, + user_prompt="Write the cited briefing now.", + model=OLLAMA_MODEL, loop_budget=DEFAULT_LOOP_BUDGET, + on_token=_on_token if progress_q else None, + on_attempt_end=_on_attempt_end if progress_q else None, + ) + paragraph = mres["paragraph"] + audit = {"raw": paragraph, "dropped": []} + mellea_meta = { + "rerolls": mres["rerolls"], + "n_attempts": mres["n_attempts"], + "requirements_passed": mres["requirements_passed"], + "requirements_failed": mres["requirements_failed"], + "requirements_total": mres["requirements_total"], + "model": mres["model"], "loop_budget": mres["loop_budget"], + } + rec_step["ok"] = True + rec_step["result"] = { + "rerolls": mellea_meta["rerolls"], + "passed": f"{len(mellea_meta['requirements_passed'])}/{mellea_meta['requirements_total']}", + "paragraph_chars": len(paragraph), + } + except Exception as e: + rec_step["err"] = str(e) + log.exception("Mellea-validated reconcile failed") + elif docs: + def _on_token(delta: str): + if progress_q is not None: + progress_q.put({"kind": "token", "delta": delta}) + try: + paragraph, audit = _reconcile(docs, on_token=_on_token if progress_q else None) + rec_step["ok"] = True + rec_step["result"] = {"paragraph_chars": len(paragraph), + "dropped": len(audit["dropped"])} + except Exception as e: + rec_step["err"] = str(e) + log.exception("neighborhood reconcile failed") + else: + paragraph = "No grounded data available for this neighborhood." + rec_step["ok"] = True + rec_step["result"] = {"paragraph_chars": len(paragraph)} + rec_step["elapsed_s"] = round(time.time() - rec_t0, 2) + _emit(rec_step) + + cite_list = citations_from_docs(docs) + + target_safe = {k: v for k, v in target.items() if k != "geometry"} + target_safe["bbox"] = list(target["geometry"].bounds) # [minx, miny, maxx, maxy] + return { + "intent": "neighborhood", + "query": query, + "plan": { + "intent": plan.intent, + "targets": plan.targets, + "specialists": plan.specialists, + "rationale": plan.rationale, + }, + "target": target_safe, + "n_matches": len(matches), + "sandy_nta": sandy_out, + "dep_nta": dep_out, + "nyc311_nta": nyc311_out, + "microtopo_nta": micro_out, + "prithvi_live": prithvi_live_out, + "terramind": terramind_out, + "rag": rag_out, + "paragraph": paragraph, + "audit": audit, + "mellea": mellea_meta, + "citations": cite_list, + "trace": trace, + "total_s": round(time.time() - t0, 2), + } + + +def _doc(doc_id: str, body_lines: list[str]) -> dict: + return {"role": f"document {doc_id}", "content": "\n".join(body_lines)} + + +def _reconcile(docs: list[dict], on_token=None) -> tuple[str, dict]: + from app.reconcile import verify_paragraph + messages = docs + [ + {"role": "system", "content": EXTRA_SYSTEM_PROMPT}, + {"role": "user", "content": "Write the cited briefing now."}, + ] + # num_ctx 4096 covers our actual prompt (system ~600 + 6 docs ~2000) + # with margin; 8192 was over-allocating KV cache. num_predict caps the + # briefing at ~400 tokens — enough for 4 sections, no runaway. + OPTS = {"temperature": 0, "num_ctx": 4096, "num_predict": 600} + if on_token is None: + resp = llm.chat(model=OLLAMA_MODEL, messages=messages, options=OPTS) + raw = resp["message"]["content"].strip() + else: + chunks: list[str] = [] + for chunk in llm.chat(model=OLLAMA_MODEL, messages=messages, + stream=True, options=OPTS): + delta = (chunk.get("message") or {}).get("content") or "" + if delta: + chunks.append(delta) + on_token(delta) + raw = "".join(chunks).strip() + cleaned, dropped = verify_paragraph(raw, docs) + return cleaned, {"raw": raw, "dropped": dropped} + + +def _empty_result(plan, query: str, trace: list, error: str) -> dict: + return { + "intent": "neighborhood", + "query": query, + "error": error, + "plan": { + "intent": plan.intent, + "targets": plan.targets, + "specialists": plan.specialists, + "rationale": plan.rationale, + }, + "trace": trace, + "citations": [], + "paragraph": f"Could not resolve target to an NTA: {error}", + } diff --git a/app/intents/single_address.py b/app/intents/single_address.py new file mode 100644 index 0000000000000000000000000000000000000000..746747692c82909906e18c6bcc7f6d30b7de88b5 --- /dev/null +++ b/app/intents/single_address.py @@ -0,0 +1,101 @@ +"""single_address intent — the existing linear FSM, wrapped behind the +planner-aware execution interface. The planner's specialist list is +respected only as an OPT-OUT: if the planner explicitly omitted a +specialist we'd otherwise run, we skip it. The fixed FSM stays as the +canonical path because (a) it's well-tested, (b) order-of-execution +matters slightly (geocode before everything), and (c) the executor +parallelism for an address is bounded by Granite 4.1 reconcile time +anyway.""" +from __future__ import annotations + +import re + +from app.fsm import run as run_linear + +_ADDRESS_SHAPE = re.compile( + r"^\d+\s+[A-Z][\w\s\.\-']+(St|Street|Ave|Avenue|Rd|Road|Blvd|" + r"Boulevard|Pl|Place|Ln|Lane|Dr|Drive|Way|Ct|Court|Pkwy|" + r"Parkway|Sq|Square|Ter|Terrace|Hwy|Highway)\.?", + re.IGNORECASE, +) + + +def _looks_like_address(s: str) -> bool: + return bool(s and _ADDRESS_SHAPE.search(s)) + + +def run(plan, query: str, progress_q=None, strict: bool = False) -> dict: + """Execute the planner's single_address Plan via the existing linear + FSM. If progress_q is provided, FSM steps and Granite reconcile tokens + are forwarded to it for live streaming. + + strict=True flips the FSM's reconcile step to Mellea-validated + rejection sampling (via a thread-local flag). Disables token + streaming for that step.""" + from app.fsm import ( + iter_steps, + set_mellea_attempt_callback, + set_planned_specialists, + set_planner_intent, + set_strict_mode, + set_token_callback, + set_user_query, + ) + planner_addr = next( + (t["text"] for t in plan.targets if t.get("type") == "address"), + None, + ) + addr = planner_addr if _looks_like_address(planner_addr) else query + set_strict_mode(strict) + set_planned_specialists(plan.specialists or []) + set_user_query(query) + set_planner_intent(plan.intent) + if progress_q is not None: + def _on_token(delta: str, attempt_idx: int = 0): + # `attempt_idx` is the 0-based Mellea reroll index. The + # SvelteKit client treats a change in this value as a + # signal to clear the live briefing buffer (per + # web/sveltekit/src/lib/client/agentStream.ts:onAttemptStart). + # We surface it as a 1-based attempt counter so the chip + # in the UI reads "attempt N" naturally. + progress_q.put({"kind": "token", "delta": delta, + "attempt": attempt_idx + 1}) + def _on_mellea_attempt(attempt_idx, passed, failed): + progress_q.put({"kind": "mellea_attempt", + "attempt": attempt_idx, + "passed": passed, "failed": failed}) + # Streaming Mellea now emits tokens during each attempt — wire + # the token callback for both strict and non-strict paths. + set_token_callback(_on_token) + set_mellea_attempt_callback(_on_mellea_attempt) + try: + final = None + for ev in iter_steps(addr): + if ev["kind"] == "step": + progress_q.put({"kind": "step", **ev}) + else: + final = ev + out = {**(final or {}), "trace": []} + finally: + set_token_callback(None) + set_mellea_attempt_callback(None) + set_strict_mode(False) + set_planned_specialists(None) + set_user_query(None) + set_planner_intent(None) + else: + try: + out = run_linear(addr) + finally: + set_strict_mode(False) + set_planned_specialists(None) + set_user_query(None) + set_planner_intent(None) + out["intent"] = "single_address" + out["plan"] = { + "intent": plan.intent, + "targets": plan.targets, + "specialists": plan.specialists, + "rationale": plan.rationale, + } + return out diff --git a/app/live/__init__.py b/app/live/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/live/floodnet_forecast.py b/app/live/floodnet_forecast.py new file mode 100644 index 0000000000000000000000000000000000000000..2f700e39ffda5eea0e3f63f2550e2174bb9496f4 --- /dev/null +++ b/app/live/floodnet_forecast.py @@ -0,0 +1,184 @@ +"""Granite TimeSeries TTM r2 forecast on FloodNet sensor flood events. + +This is the strongest single TTM win for the NYU CUSP audience. +FloodNet (CUSP/Brooklyn College, Charlie Mydlarz + Andrea Silverman) +operates the sensor network and publishes the historical events; they +do not publish per-sensor forecasts. Riprap producing a forecast on +FloodNet's own data is a genuine ecosystem-extension capability — and +unlike the surge / 311 forecasts, the audience explicitly cares about +this dataset. + +Architecture: +- Nearest FloodNet sensor to the queried address (reuse + `app.context.floodnet.sensors_near`). +- 512 days of binary daily-event history at that sensor (1 if any + labeled flood event started on that day, else 0). +- TTM r2 (512 → 96) reused from `app.live.ttm_forecast._load_model` — + *no new model class loaded into memory*. The existing 311 daily + forecaster has already paid this load cost. +- 96-day-ahead daily forecast → aggregated into 4-week and 12-week + expected counts so the briefing narration stays human-readable. + +Silence over confabulation: returns `available: False` with a +reason field on every failure path. Sensors with fewer than 5 +flood events in their entire history yield no forecast (the TTM +output on near-empty histories is dominated by quantization noise). + +Doc-id format: `floodnet_forecast_` so it's distinct +from the existing `[floodnet]` event-history doc. +""" +from __future__ import annotations + +import logging +from datetime import datetime, timedelta, timezone + +import numpy as np + +from app.context.floodnet import flood_events_for, sensors_near +from app.live.ttm_forecast import ( + _MODEL_LOAD_ERROR, + DAILY_CONTEXT, + DAILY_PREDICTION, + _run_ttm, +) + +log = logging.getLogger("riprap.floodnet_forecast") + +DOC_ID_PREFIX = "floodnet_forecast" +CITATION = ( + "FloodNet NYC ultrasonic depth sensors (api.floodnet.nyc) + " + "IBM Granite TimeSeries TTM r2 (Ekambaram et al. 2024, NeurIPS) " + "via granite-tsfm — daily flood-event recurrence forecast" +) + +# A sensor with <5 historical events in 512 days has too sparse a +# signal for TTM to produce a meaningful forecast. The model still +# runs, but the output is dominated by quantization noise around +# zero; emitting a doc from that state is exactly the kind of +# pseudo-quantitative claim the four-tier discipline guards against. +MIN_EVENTS_FOR_FORECAST = 5 + +# Search radius for nearest-sensor lookup. Wider than the existing +# `floodnet` specialist's 600 m (which scans for *all* sensors at +# the address) — we just need *one* relevant sensor for the forecast. +NEAREST_SENSOR_RADIUS_M = 1500 + + +def _haversine_m(lat1, lon1, lat2, lon2) -> float: + from math import asin, cos, radians, sin, sqrt + R = 6371000.0 + p1, p2 = radians(lat1), radians(lat2) + dp = radians(lat2 - lat1); dl = radians(lon2 - lon1) + a = sin(dp / 2) ** 2 + cos(p1) * cos(p2) * sin(dl / 2) ** 2 + return 2 * R * asin(sqrt(a)) + + +def _build_daily_event_series( + deployment_id: str, days: int +) -> tuple[np.ndarray, list[str], int]: + """Pull flood events for one sensor over `days` days, return a + daily binary series (1 if ≥1 flood event started that day, 0 + otherwise) plus the event count.""" + since = datetime.now(timezone.utc) - timedelta(days=days + 2) + events = flood_events_for([deployment_id], since=since) + end = datetime.now(timezone.utc).date() + start = end - timedelta(days=days - 1) + by_day: dict[str, int] = {} + for e in events: + ds = (e.start_time or "")[:10] + if not ds: + continue + by_day[ds] = 1 + series: list[int] = [] + labels: list[str] = [] + for i in range(days): + d = start + timedelta(days=i) + d_iso = d.isoformat() + labels.append(d_iso) + series.append(by_day.get(d_iso, 0)) + return np.array(series, dtype=np.float32), labels, len(events) + + +def summary_for_point(lat: float, lon: float) -> dict: + """Forecast flood-event recurrence at the nearest FloodNet sensor. + + Returns a dict with `available: bool`. On success, includes the + sensor identity, history summary, and forecast aggregates. + """ + try: + sensors = sensors_near(lat, lon, NEAREST_SENSOR_RADIUS_M) + except Exception as e: + log.warning("FloodNet sensor lookup failed: %r", e) + return {"available": False, "reason": "FloodNet API unreachable"} + + if not sensors: + return {"available": False, + "reason": f"no FloodNet sensor within {NEAREST_SENSOR_RADIUS_M} m"} + + # Closest by haversine. Some deployments have null geometry; skip those. + geo_sensors = [s for s in sensors if s.lat is not None and s.lon is not None] + if not geo_sensors: + return {"available": False, "reason": "nearest sensor has no geometry"} + nearest = min(geo_sensors, + key=lambda s: _haversine_m(lat, lon, s.lat, s.lon)) + distance_m = _haversine_m(lat, lon, nearest.lat, nearest.lon) + + try: + history, labels, total_events = _build_daily_event_series( + nearest.deployment_id, days=DAILY_CONTEXT + ) + except Exception as e: + log.warning("FloodNet history fetch failed for %s: %r", + nearest.deployment_id, e) + return {"available": False, "reason": "history fetch failed"} + + if total_events < MIN_EVENTS_FOR_FORECAST: + return { + "available": False, + "reason": (f"sensor has only {total_events} historical events " + f"(<{MIN_EVENTS_FOR_FORECAST}); forecast omitted"), + "sensor_id": nearest.deployment_id, + "sensor_name": nearest.name, + } + + forecast = _run_ttm(history, DAILY_CONTEXT, DAILY_PREDICTION) + if forecast is None: + return {"available": False, + "reason": _MODEL_LOAD_ERROR or "TTM inference failed"} + + fc = np.clip(forecast, 0, None) + fc28 = float(fc[:28].sum()) + fc_total = float(fc.sum()) + fc_peak_offset = int(fc.argmax()) + 1 + fc_peak_value = float(fc.max()) + + hist_total = int(history.sum()) + hist_recent_28d = float(history[-28:].sum()) + + # "Accelerating" if the next-28-days expected count materially + # exceeds the prior-28-days observed count. + accelerating = (hist_recent_28d > 0 + and fc28 > 1.5 * hist_recent_28d) + + return { + "available": True, + "doc_id": f"{DOC_ID_PREFIX}_{nearest.deployment_id}", + "sensor_id": nearest.deployment_id, + "sensor_name": nearest.name, + "sensor_street": nearest.street, + "sensor_borough": nearest.borough, + "sensor_lat": nearest.lat, + "sensor_lon": nearest.lon, + "distance_from_query_m": round(distance_m, 1), + "history_window_days": DAILY_CONTEXT, + "history_total_events": hist_total, + "history_recent_28d_events": int(hist_recent_28d), + "forecast_horizon_days": DAILY_PREDICTION, + "forecast_28d_expected_events": round(fc28, 2), + "forecast_total_horizon_events": round(fc_total, 2), + "forecast_peak_day_offset": fc_peak_offset, + "forecast_peak_day_value": round(fc_peak_value, 3), + "accelerating": accelerating, + "model": "granite-timeseries-ttm-r2", + "citation": CITATION, + } diff --git a/app/live/ttm_battery_surge.py b/app/live/ttm_battery_surge.py new file mode 100644 index 0000000000000000000000000000000000000000..32c66085dee2f44be845fd2df285e887f562d143 --- /dev/null +++ b/app/live/ttm_battery_surge.py @@ -0,0 +1,328 @@ +"""Granite TTM r2 — Battery 96 h surge nowcast (NYC fine-tune). + +Wraps the Apache-2.0 [`msradam/Granite-TTM-r2-Battery-Surge`](https://huggingface.co/msradam/Granite-TTM-r2-Battery-Surge) +fine-tune. Fetches the past 1024 hours (~43 days) of hourly verified +water level + harmonic tide predictions at NOAA station 8518750 (The +Battery), computes surge residual (observed − predicted), and forecasts +the next 96 hours. + +Distinct from `app.live.ttm_forecast` — that's the *zero-shot* TTM r2 +on 6-min cadence (~9.6 h horizon) at the closest of three NYC gauges. +This module is the *fine-tuned* model on hourly cadence (~4-day horizon) +at a single gauge (Battery only — see MODEL_CARD honest-limitations). + +Both nowcasts coexist in the FSM. The zero-shot is shorter-horizon and +covers every coastal NYC query; the fine-tuned is longer-horizon and +specialised to the Battery's storm-surge regime, which is the dominant +driver of NYC inundation. The reconciler frames each as a separate +forecast in the briefing. + +Gated by RIPRAP_TTM_BATTERY_SURGE_ENABLE — deployments without the +heavy ML deps (granite-tsfm / transformers) silently no-op via the +same skipped-result shape every other heavy specialist emits. +""" +from __future__ import annotations + +import logging +import os +import threading +import time +from datetime import datetime, timedelta +from typing import Any + +log = logging.getLogger("riprap.ttm_battery_surge") + +ENABLE = os.environ.get( + "RIPRAP_TTM_BATTERY_SURGE_ENABLE", "1" +).lower() in ("1", "true", "yes") +DEVICE = os.environ.get("RIPRAP_TTM_BATTERY_SURGE_DEVICE", "cpu") +REPO = "msradam/Granite-TTM-r2-Battery-Surge" + +DOC_ID = "ttm_battery" +CITATION = ( + "msradam/Granite-TTM-r2-Battery-Surge (Apache-2.0, fine-tune of " + "ibm-granite/granite-timeseries-ttm-r2). Trained on AMD Instinct " + "MI300X via AMD Developer Cloud. Test MAE 0.1091 m on held-out " + "2023-2024 windows (vs 0.1467 zero-shot, 0.1861 persistence)." +) + +# NOAA Battery (NY) — the canonical NYC storm-surge gauge. +STATION_ID = "8518750" +STATION_NAME = "The Battery, NY" +NOAA_API = "https://api.tidesandcurrents.noaa.gov/api/prod/datagetter" + +# TTM r2 1024-96-r2 backbone: 1024 hours of context, 96 hours of horizon. +CONTEXT_LENGTH = 1024 +PREDICTION_LENGTH = 96 + +# Doc emission gate: only cite the forecast if the predicted peak surge +# is meaningful (positive ≥0.3 m or negative ≤-0.3 m). On a calm day the +# model still runs but the reconciler sees no doc. +MIN_INTERESTING_RESIDUAL_M = float( + os.environ.get("RIPRAP_TTM_BATTERY_MIN_INTERESTING_M", "0.3")) + +_MODEL = None +_INIT_LOCK = threading.Lock() + + +def _has_required_deps() -> tuple[bool, str | None]: + missing: list[str] = [] + for name in ("tsfm_public", "huggingface_hub", "torch", "requests", + "pandas"): + try: + __import__(name) + except ImportError: + missing.append(name) + if missing: + return False, ", ".join(missing) + return True, None + + +_DEPS_OK, _DEPS_MISSING = _has_required_deps() + + +def _ensure_model(): + """Load the fine-tuned TTM r2 once and cache. Failure is sticky — + a downloaded-then-broken model leaves _MODEL=None so subsequent + fetches re-attempt rather than silently serving a half-built one.""" + global _MODEL + if _MODEL is not None: + return _MODEL + with _INIT_LOCK: + if _MODEL is not None: + return _MODEL + from huggingface_hub import snapshot_download + + # Force-import dispatched class names so the transformers lazy + # registry can resolve `PreTrainedModel` / `TinyTimeMixerForPrediction` + # under FSM worker threads. Same pattern as ttm_forecast._load_model. + from transformers import PreTrainedModel # noqa: F401 + from tsfm_public import TinyTimeMixerForPrediction + log.info("ttm_battery_surge: downloading %s", REPO) + local_dir = snapshot_download(REPO) + log.info("ttm_battery_surge: loading model from %s", local_dir) + model = TinyTimeMixerForPrediction.from_pretrained(local_dir).eval() + if DEVICE == "cuda": + try: + import torch + if torch.cuda.is_available(): + model = model.cuda() + except Exception: + log.exception("ttm_battery_surge: cuda move failed; " + "staying on CPU") + _MODEL = model + return _MODEL + + +def _fetch_chunk(start: datetime, end: datetime, product: str): + """Pull one ≤30-day chunk from the NOAA CO-OPS datagetter. + + Two products: `water_level` (verified, 6-min — we ask for hourly + via interval=h) and `predictions` (hourly harmonic tide). Both come + back in metres if `units=metric`. + """ + import pandas as pd + import requests + params = { + "station": STATION_ID, + "begin_date": start.strftime("%Y%m%d"), + "end_date": end.strftime("%Y%m%d"), + "product": product, + "datum": "MLLW", + "units": "metric", + "time_zone": "gmt", + "format": "json", + "application": "riprap-nyc", + "interval": "h", + } + resp = requests.get(NOAA_API, params=params, timeout=30) + resp.raise_for_status() + data = resp.json() + key = "data" if "data" in data else "predictions" + if key not in data: + return pd.DataFrame() + df = pd.DataFrame(data[key]) + if df.empty: + return df + df["timestamp"] = pd.to_datetime(df["t"]) + df["value"] = pd.to_numeric(df["v"], errors="coerce") + return df[["timestamp", "value"]].dropna() + + +def _fetch_battery_history(hours: int) -> Any: + """Pull the last `hours` hours of (water_level, predicted) at the + Battery and return a DataFrame with columns + `timestamp / water_level_m / predicted_m / surge_residual_m`.""" + import pandas as pd + + end_d = datetime.utcnow().replace(minute=0, second=0, microsecond=0) + n_days = max(1, hours // 24 + 3) # padding in case of NOAA gaps + + chunks_wl, chunks_pr = [], [] + cur = end_d - timedelta(days=n_days) + while cur < end_d: + nxt = min(cur + timedelta(days=30), end_d) + try: + chunks_wl.append(_fetch_chunk(cur, nxt, "water_level")) + chunks_pr.append(_fetch_chunk(cur, nxt, "predictions")) + except Exception as e: + log.warning("ttm_battery_surge: NOAA chunk %s..%s failed: %s", + cur.date(), nxt.date(), e) + cur = nxt + + wl = pd.concat(chunks_wl, ignore_index=True) if chunks_wl else pd.DataFrame() + pr = pd.concat(chunks_pr, ignore_index=True) if chunks_pr else pd.DataFrame() + if wl.empty or pr.empty: + return pd.DataFrame() + wl = wl.rename(columns={"value": "water_level_m"}) + pr = pr.rename(columns={"value": "predicted_m"}) + df = wl.merge(pr, on="timestamp", how="inner").sort_values("timestamp") + df["surge_residual_m"] = df["water_level_m"] - df["predicted_m"] + df = df.dropna(subset=["surge_residual_m"]) + if len(df) > hours: + df = df.iloc[-hours:].reset_index(drop=True) + return df + + +def _summarize(history_df, forecast_arr) -> dict[str, Any]: + """Build the public dict the FSM specialist hands to the reconciler. + + Includes both raw arrays (for downstream charts in the trace UI) + and human-readable scalars (peak / peak time / interesting flag) + that the reconciler can paraphrase without overshooting evidence. + """ + import numpy as np + history_arr = history_df["surge_residual_m"].to_numpy() + history_recent = float(history_arr[-1]) if len(history_arr) else None + history_peak_abs = float(np.max(np.abs(history_arr))) if len(history_arr) else None + + fc = np.asarray(forecast_arr, dtype="float64").reshape(-1) + if fc.size == 0: + return {"available": False, "reason": "empty forecast"} + peak_idx = int(np.argmax(np.abs(fc))) + peak = float(fc[peak_idx]) + peak_h_ahead = peak_idx + 1 # hourly cadence; index 0 = +1 h + + last_ts = (history_df["timestamp"].iloc[-1] + if len(history_df) else datetime.utcnow()) + peak_time = last_ts + timedelta(hours=peak_h_ahead) + + interesting = bool(abs(peak) >= MIN_INTERESTING_RESIDUAL_M) + + return { + "available": True, + "interesting": interesting, + "model": REPO, + "station_id": STATION_ID, + "station_name": STATION_NAME, + "context_hours": int(len(history_arr)), + "horizon_hours": int(fc.size), + "history_recent_m": (round(history_recent, 3) + if history_recent is not None else None), + "history_peak_abs_m": (round(history_peak_abs, 3) + if history_peak_abs is not None else None), + "forecast_peak_m": round(peak, 3), + "forecast_peak_hours_ahead": peak_h_ahead, + "forecast_peak_time_utc": peak_time.isoformat(timespec="minutes"), + "forecast_array_m": [round(float(v), 4) for v in fc.tolist()], + } + + +def fetch(timeout_s: float = 60.0) -> dict[str, Any]: + """Run the specialist. Always returns a dict with at minimum + `{available: bool, reason | ...}`. Caller should treat + `available=False` as silence-over-confabulation.""" + if not ENABLE: + return {"available": False, + "reason": "RIPRAP_TTM_BATTERY_SURGE_ENABLE=0"} + + t0 = time.time() + try: + df = _fetch_battery_history(CONTEXT_LENGTH) + if len(df) < CONTEXT_LENGTH: + return {"available": False, + "reason": f"insufficient NOAA history: " + f"got {len(df)} hours, need {CONTEXT_LENGTH}"} + if time.time() - t0 > timeout_s: + return {"available": False, + "reason": "NOAA fetch exceeded budget"} + + residuals = df["surge_residual_m"].to_numpy().astype("float32") + + # v0.4.5 — try the MI300X service first. The remote handles its + # own model loading; if it's reachable we never need local + # tsfm_public, which lets the HF Space drop the granite-tsfm + # bake from the image. When the remote is configured but returns + # non-ok we surface the remote error rather than try a local + # load — the local code path can ModuleNotFoundError on transient + # transformers-registry races and that's a worse user signal. + forecast = None + compute = "local" + remote_attempted = False + try: + from app import inference as _inf + if _inf.remote_enabled(): + remote_attempted = True + remote = _inf.ttm_forecast( + "fine_tune_battery", residuals.tolist(), + context_length=CONTEXT_LENGTH, + prediction_length=PREDICTION_LENGTH, + cadence="h", + timeout=timeout_s, + ) + if remote.get("ok"): + import numpy as np + forecast = np.asarray(remote["forecast"], dtype="float32") + compute = f"remote · {remote.get('device', 'gpu')}" + else: + return {"available": False, + "reason": f"remote ttm-forecast non-ok: " + f"{remote.get('error') or 'unknown'}", + "elapsed_s": round(time.time() - t0, 2)} + except _inf.RemoteUnreachable as e: + log.info("ttm_battery_surge: remote unreachable (%s); local", e) + except Exception as e: + log.exception("ttm_battery_surge: remote call failed") + if remote_attempted: + return {"available": False, + "reason": f"remote ttm-forecast error: " + f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} + + if forecast is None: + if not _DEPS_OK: + return {"available": False, + "reason": f"deps unavailable on this deployment: " + f"{_DEPS_MISSING}"} + import torch + model = _ensure_model() + past = torch.from_numpy(residuals).unsqueeze(0).unsqueeze(-1) + if DEVICE == "cuda": + try: + if torch.cuda.is_available(): + past = past.cuda() + except Exception: + log.exception("ttm_battery_surge: cuda move failed") + with torch.no_grad(): + out = model(past_values=past) + forecast = out.prediction_outputs.squeeze(-1).squeeze(0).cpu().numpy() + + result = _summarize(df, forecast) + result["compute"] = compute + result["elapsed_s"] = round(time.time() - t0, 2) + return result + except Exception as e: + log.exception("ttm_battery_surge fetch failed") + return {"available": False, + "reason": f"{type(e).__name__}: {e}", + "elapsed_s": round(time.time() - t0, 2)} + + +def warm(): + """Optional pre-load — amortizes the first-query model build cost.""" + if not ENABLE or not _DEPS_OK: + return + try: + _ensure_model() + except Exception: + log.exception("ttm_battery_surge: warm() failed") diff --git a/app/live/ttm_forecast.py b/app/live/ttm_forecast.py new file mode 100644 index 0000000000000000000000000000000000000000..01ebd472919f71d2a238a873e4d11c7e099ab79b --- /dev/null +++ b/app/live/ttm_forecast.py @@ -0,0 +1,429 @@ +"""Granite TimeSeries TTM r2 — short-horizon nowcast for the live tide +residual (storm surge / wind setup) at the NYC harbor entrance. + +Why TTM here, vs the existing live NOAA fetcher: +- The existing `noaa_tides` specialist returns a single 6-min snapshot: + observed, predicted, residual = observed - predicted. That's "right now." +- TTM forecasts the next ~9.6 hours of the *residual* — the meteorologic + component (surge + wind setup). NOAA already publishes the astronomical + tide; TTM tells us if the surge component is about to peak. +- This is the genuinely useful add: a nowcast of the part NOAA *doesn't* + predict. + +Architecture: ibm-granite/granite-timeseries-ttm-r2, ~1.5M params, +zero-shot multivariate (we use it univariate here on the residual +series). 512-step context @ 6-min cadence = ~51 h of history; +96-step horizon = ~9.6 h ahead. + +Citation: Ekambaram, V., et al. (2024). "Tiny Time Mixers (TTMs): +Fast Pre-trained Models for Enhanced Zero/Few-Shot Forecasting of +Multivariate Time Series." NeurIPS 2024. + +Gated emission: a doc is only added when the forecast peak residual +exceeds an absolute threshold (default 0.3 ft / 9 cm). On a calm day +the model still runs, but the reconciler sees no doc — silence over +confabulation. +""" +from __future__ import annotations + +import logging +from datetime import datetime, timedelta + +import httpx +import numpy as np + +log = logging.getLogger("riprap.ttm_forecast") + +DOC_ID = "ttm_forecast" +CITATION = ("IBM Granite TimeSeries TTM r2 (Ekambaram et al. 2024, NeurIPS); " + "ibm-granite/granite-timeseries-ttm-r2 via granite-tsfm") + +# Three NOAA stations covering NYC harbor + Long Island Sound + Bight. +# step_ttm_forecast picks the closest to the queried address (matches the +# existing nearest-gauge behaviour in step_noaa_tides). This means an +# inland-Queens query forecasts at Kings Point (Long Island Sound), a +# Coney Island query forecasts at Sandy Hook (Bight), and a Manhattan +# query forecasts at the Battery — each gauge characterises a different +# storm-surge regime. +STATIONS = [ + ("8518750", "The Battery, NY", 40.7006, -74.0142), + ("8516945", "Kings Point, NY", 40.8103, -73.7649), + ("8531680", "Sandy Hook, NJ", 40.4669, -74.0094), +] +NOAA_URL = "https://api.tidesandcurrents.noaa.gov/api/prod/datagetter" + +CONTEXT_LENGTH = 512 # ~51 h at 6-min cadence (surge forecast) +PREDICTION_LENGTH = 96 # ~9.6 h horizon (surge forecast) +MIN_INTERESTING_RESIDUAL_FT = 0.3 # ~9 cm — gate for doc emission + +# 311 daily-counts forecast — TTM r2's smallest pretrained config is +# 512 context which is awkward for weekly counts on a single address. +# Daily aggregation (512 days ≈ 17 months of complaint history) lets +# the model run natively at its standard resolution; we forecast the +# next 96 days (~3 months). +DAILY_CONTEXT = 512 +DAILY_PREDICTION = 96 +NYC_311_URL = "https://data.cityofnewyork.us/resource/erm2-nwe9.json" +NYC_311_FLOOD_DESCRIPTORS = ( + "Sewer Backup (Use Comments) (SA)", + "Catch Basin Clogged/Flooding (Use Comments) (SC)", + "Street Flooding (SJ)", + "Manhole Overflow (Use Comments) (SA1)", + "Flooding on Street", +) + + +# ---- Lazy-loaded model singleton ----------------------------------------- + +_MODELS: dict[tuple[int, int], object] = {} +_MODEL_LOAD_ERROR: str | None = None + + +def _load_model(context_length: int = CONTEXT_LENGTH, + prediction_length: int = PREDICTION_LENGTH): + """TTM r2 is configured per (context, prediction) length pair. Cache + by that pair so the surge forecaster (512→96) and the weekly 311 + forecaster (52→4) each get their own model handle on first use.""" + global _MODEL_LOAD_ERROR + key = (context_length, prediction_length) + if key in _MODELS: + return _MODELS[key] + if _MODEL_LOAD_ERROR is not None: + return None + try: + import torch # noqa: F401 + + # Force-import the registered class names BEFORE get_model so that + # transformers' lazy registry can resolve them by string. Without + # this, AutoModel-style dispatch raises + # ModuleNotFoundError("Could not import module 'PreTrainedModel'") + # under the FSM worker thread (the lazy import path races with + # other model loads). See web/main.py startup for the same + # pre-import on the main thread. + from transformers import PreTrainedModel # noqa: F401 + from tsfm_public import TinyTimeMixerForPrediction # noqa: F401 + from tsfm_public.toolkit.get_model import get_model + m = get_model( + "ibm-granite/granite-timeseries-ttm-r2", + context_length=context_length, + prediction_length=prediction_length, + ) + m.eval() + _MODELS[key] = m + log.info("TTM r2 loaded (context=%d horizon=%d)", + context_length, prediction_length) + return m + except Exception as e: + _MODEL_LOAD_ERROR = repr(e) + log.exception("TTM model load failed; future calls will be skipped") + return None + + +# Closest-of-three station selection (mirrors app/context/noaa_tides.py). +def _haversine_km(lat1, lon1, lat2, lon2) -> float: + from math import asin, cos, radians, sin, sqrt + R = 6371.0 + p1, p2 = radians(lat1), radians(lat2) + dp = radians(lat2 - lat1); dl = radians(lon2 - lon1) + a = sin(dp / 2) ** 2 + cos(p1) * cos(p2) * sin(dl / 2) ** 2 + return 2 * R * asin(sqrt(a)) + + +def _nearest_station(lat: float, lon: float): + return min(STATIONS, key=lambda s: _haversine_km(lat, lon, s[2], s[3])) + + +# ---- NOAA history fetch -------------------------------------------------- + +def _fetch_noaa_series(begin_iso: str, end_iso: str, product: str, + station_id: str) -> dict: + """One-shot NOAA datagetter for a date range. Returns the JSON body.""" + r = httpx.get(NOAA_URL, params={ + "begin_date": begin_iso, "end_date": end_iso, + "station": station_id, "product": product, + "datum": "MLLW", "units": "english", "time_zone": "lst_ldt", + "format": "json", + }, timeout=15.0) + r.raise_for_status() + return r.json() + + +def _residual_series(station_id: str, + n_obs_needed: int = CONTEXT_LENGTH) -> tuple[np.ndarray, list[str]] | None: + """Build the recent residual series (observed - predicted) at 6-min + cadence, length CONTEXT_LENGTH. Returns (values_ft, timestamps_iso). + Returns None if NOAA refused, returned mismatched shapes, or the + series is too short.""" + # Fetch slightly more than we need to absorb the occasional missing + # 6-min sample; we'll trim to exact length below. + end = datetime.utcnow() + # NOAA recommends LST/LDT for time_zone matching across products + begin = end - timedelta(minutes=6 * (n_obs_needed + 50)) + fmt = "%Y%m%d %H:%M" + begin_s = begin.strftime(fmt) + end_s = end.strftime(fmt) + try: + obs_j = _fetch_noaa_series(begin_s, end_s, "water_level", station_id) + pred_j = _fetch_noaa_series(begin_s, end_s, "predictions", station_id) + except Exception as e: + log.warning("NOAA fetch failed: %r", e) + return None + obs_data = obs_j.get("data") or [] + pred_data = pred_j.get("predictions") or [] + if not obs_data or not pred_data: + return None + # Both products are 6-min cadence and share timestamps; align by t. + obs_by_t = {row["t"]: float(row["v"]) for row in obs_data if row.get("v")} + pred_by_t = {row["t"]: float(row["v"]) for row in pred_data if row.get("v")} + common_ts = sorted(set(obs_by_t) & set(pred_by_t)) + if len(common_ts) < n_obs_needed: + log.warning("only %d aligned NOAA samples (need %d)", + len(common_ts), n_obs_needed) + return None + common_ts = common_ts[-n_obs_needed:] + residual = np.array([obs_by_t[t] - pred_by_t[t] for t in common_ts], + dtype=np.float32) + return residual, common_ts + + +# ---- Forecast -------------------------------------------------------------- + +def _run_ttm(history: np.ndarray, + context_length: int = CONTEXT_LENGTH, + prediction_length: int = PREDICTION_LENGTH, + cadence: str = "h") -> np.ndarray | None: + """Channel-wise standardize, run model, de-standardize. Returns a + `prediction_length`-step de-standardized forecast in input units. + + v0.4.5 — tries the MI300X riprap-models service first; falls back + to the local in-process model on RemoteUnreachable. The + standardize / de-standardize math is owned by THIS function so the + remote service stays a thin "given a series, give me a forecast" + contract. + """ + global _MODEL_LOAD_ERROR + mu = float(history.mean()) + sigma = float(history.std() + 1e-6) + normed = (history - mu) / sigma + + # Try remote first. When remote is configured we bias HARD toward it: + # if the remote returns non-ok we surface that error rather than + # silently falling through to a local model load (which on cpu-basic + # surfaces would 502 with a cryptic transformers-internal + # ModuleNotFoundError). Local fallback is only used when the remote + # is unreachable (transport-level), which is what a degraded droplet + # actually looks like. + remote_attempted = False + try: + from app import inference as _inf + if _inf.remote_enabled(): + remote_attempted = True + remote = _inf.ttm_forecast( + "zero_shot_battery", normed.tolist(), + context_length=context_length, + prediction_length=prediction_length, + cadence=cadence, + ) + if remote.get("ok"): + pred = np.asarray(remote["forecast"], dtype=np.float32) + return pred * sigma + mu + _MODEL_LOAD_ERROR = ( + f"remote ttm-forecast returned non-ok: {remote.get('error') or remote}" + ) + log.warning("TTM zero-shot: remote returned non-ok: %s", remote) + return None + except _inf.RemoteUnreachable as e: + log.info("TTM zero-shot: remote unreachable (%s); local fallback", e) + except Exception as e: + log.exception("TTM zero-shot remote call failed: %r", e) + if remote_attempted: + _MODEL_LOAD_ERROR = f"remote ttm-forecast errored: {type(e).__name__}: {e}" + return None + + # Local fallback (only reached when remote isn't configured or is + # unreachable at the transport level). + try: + model = _load_model(context_length, prediction_length) + except Exception as e: + _MODEL_LOAD_ERROR = f"{type(e).__name__}: {e}" + log.exception("TTM model load raised: %r", e) + return None + if model is None: + return None + try: + import torch + except ImportError: + _MODEL_LOAD_ERROR = "torch not available on this deployment" + return None + x = torch.from_numpy(normed.astype(np.float32))[None, :, None] + try: + with torch.no_grad(): + out = model(past_values=x) + except Exception as e: + _MODEL_LOAD_ERROR = f"{type(e).__name__}: {e}" + log.exception("TTM inference failed: %r", e) + return None + pred = out.prediction_outputs[0, :, 0].cpu().numpy() + return pred * sigma + mu + + +def summary_for_point(lat: float, lon: float) -> dict: + """Surge forecast at the NOAA gauge nearest the queried address. + + Three gauges cover NYC: Battery (harbor entrance), Kings Point + (LI Sound), Sandy Hook (Bight). Surge regimes differ — Sandy 2012 + peaked at +14 ft at the Battery vs. lower at Kings Point because + the gauges respond to different forcing geometries. Picking the + closest gauge to the queried address makes the forecast + address-relevant rather than always city-wide. + """ + sid, sname, slat, slon = _nearest_station(lat, lon) + distance_km = round(_haversine_km(lat, lon, slat, slon), 1) + + series = _residual_series(sid) + if series is None: + return {"available": False, + "reason": "NOAA history fetch returned insufficient data", + "station_id": sid, "station_name": sname, + "distance_km": distance_km} + history, timestamps = series + forecast = _run_ttm(history, CONTEXT_LENGTH, PREDICTION_LENGTH) + if forecast is None: + return {"available": False, + "reason": _MODEL_LOAD_ERROR or "TTM inference failed", + "station_id": sid, "station_name": sname, + "distance_km": distance_km} + + history_peak = float(np.max(np.abs(history))) + fc_peak_idx = int(np.argmax(np.abs(forecast))) + fc_peak_ft = float(forecast[fc_peak_idx]) + fc_peak_minutes_ahead = (fc_peak_idx + 1) * 6 + fc_peak_time = datetime.utcnow() + timedelta(minutes=fc_peak_minutes_ahead) + + interesting = (abs(fc_peak_ft) >= MIN_INTERESTING_RESIDUAL_FT or + history_peak >= MIN_INTERESTING_RESIDUAL_FT) + + return { + "available": True, + "interesting": interesting, + "station_id": sid, + "station_name": sname, + "distance_km": distance_km, + "context_length": int(len(history)), + "horizon_steps": int(len(forecast)), + "history_peak_abs_ft": round(history_peak, 2), + "history_recent_ft": round(float(history[-1]), 2), + "forecast_peak_ft": round(fc_peak_ft, 2), + "forecast_peak_minutes_ahead": fc_peak_minutes_ahead, + "forecast_peak_time_utc": fc_peak_time.isoformat(timespec="minutes") + "Z", + "threshold_ft": MIN_INTERESTING_RESIDUAL_FT, + } + + +# ---- Per-address daily 311 flood-complaint forecast ---------------------- + +def _fetch_311_flood_daily(lat: float, lon: float, + radius_m: int = 200, + days: int = DAILY_CONTEXT, + ) -> tuple[np.ndarray, list[str]] | None: + """Pull `days` of daily flood-complaint counts within `radius_m` of + (lat, lon) from NYC OpenData. Returns (counts_array_length_days, + date_labels) or None on failure. Missing days are zero-filled.""" + from collections import defaultdict + from datetime import datetime as _dt + from datetime import timedelta as _td + end = _dt.utcnow().date() + start = end - _td(days=days + 1) + descs = " OR ".join(f"descriptor='{d}'" for d in NYC_311_FLOOD_DESCRIPTORS) + where = ( + f"created_date between '{start.isoformat()}T00:00:00' and " + f"'{end.isoformat()}T23:59:59' AND " + f"latitude IS NOT NULL AND longitude IS NOT NULL AND " + f"({descs}) AND " + f"within_circle(location, {lat}, {lon}, {radius_m})" + ) + try: + r = httpx.get(NYC_311_URL, + params={"$select": "created_date", + "$where": where, + "$limit": "50000"}, + timeout=20.0) + r.raise_for_status() + rows = r.json() + except Exception as e: + log.warning("311 flood fetch for TTM failed: %r", e) + return None + + counts: dict[str, int] = defaultdict(int) + for row in rows or []: + ds = (row.get("created_date") or "")[:10] + if not ds: + continue + counts[ds] += 1 + + series: list[int] = [] + labels: list[str] = [] + for i in range(days): + d = end - _td(days=days - 1 - i) + d_iso = d.isoformat() + labels.append(d_iso) + series.append(counts.get(d_iso, 0)) + return np.array(series, dtype=np.float32), labels + + +def weekly_311_forecast_for_point(lat: float, lon: float, + radius_m: int = 200) -> dict: + """TTM r2 zero-shot forecast on per-address daily 311 + flood-complaint counts. Despite the name — kept for FSM-call-site + stability — this now operates on daily resolution (TTM r2's + smallest native config is 512 context, awkward for weekly). + History: 512 days (~17 months); forecast: 96 days (~3 months). + Returns daily and weekly summaries so the reconciler narration + stays human-readable. + + Designed not to raise. Returns `available: False` with a reason + field on any failure path.""" + series = _fetch_311_flood_daily(lat, lon, radius_m=radius_m) + if series is None: + return {"available": False, "reason": "311 history fetch failed"} + history, labels = series + forecast = _run_ttm(history, DAILY_CONTEXT, DAILY_PREDICTION) + if forecast is None: + return {"available": False, + "reason": _MODEL_LOAD_ERROR or "TTM inference failed"} + + fc_clipped = np.clip(forecast, 0, None) + hist_total = int(history.sum()) + hist_mean_per_day = float(history.mean()) + hist_recent_mean_30d = float(history[-30:].mean()) + fc_total = float(fc_clipped.sum()) + fc_mean_per_day = float(fc_clipped.mean()) + fc_peak_day = float(fc_clipped.max()) + fc_peak_day_offset = int(fc_clipped.argmax()) + 1 + + # Aggregate to weekly equivalents for the briefing narration — + # readers think in weeks, not days. + history_weekly_mean = hist_mean_per_day * 7 + forecast_weekly_mean = fc_mean_per_day * 7 + + accelerating = (hist_recent_mean_30d > 0 and + fc_mean_per_day > 1.5 * hist_recent_mean_30d) + + return { + "available": True, + "radius_m": radius_m, + "days_context": DAILY_CONTEXT, + "days_horizon": DAILY_PREDICTION, + "history_total_complaints": hist_total, + "history_mean_per_day": round(hist_mean_per_day, 3), + "history_recent_30d_mean": round(hist_recent_mean_30d, 3), + "history_weekly_equivalent": round(history_weekly_mean, 2), + "forecast_total_next_horizon": round(fc_total, 1), + "forecast_mean_per_day": round(fc_mean_per_day, 3), + "forecast_weekly_equivalent": round(forecast_weekly_mean, 2), + "forecast_peak_day": round(fc_peak_day, 2), + "forecast_peak_day_offset": fc_peak_day_offset, + "accelerating": accelerating, + "context_window_start": labels[0] if labels else None, + "context_window_end": labels[-1] if labels else None, + } diff --git a/app/llm.py b/app/llm.py new file mode 100644 index 0000000000000000000000000000000000000000..aee4c9ea3c2fd0353a5ee6eea1967c723d5b6f0f --- /dev/null +++ b/app/llm.py @@ -0,0 +1,466 @@ +"""LiteLLM-backed shim around the ollama.chat call surface. + +Single function `chat(model, messages, options, stream)` that returns the +same dict / iterator-of-dicts shape `ollama.chat` returns, so existing +call sites swap `import ollama` -> `from app import llm` with no other +changes. + +Backend selection (env): + RIPRAP_LLM_PRIMARY = "vllm" | "ollama" (default: ollama) + RIPRAP_LLM_BASE_URL = http://amd:8000/v1 (vllm only) + RIPRAP_LLM_API_KEY = (vllm only) + RIPRAP_LLM_FALLBACK = "ollama" | "" (default: "ollama" when + primary=vllm, else "") + OLLAMA_BASE_URL = http://host:11434 (ollama backend only) + +Model routing: callers may pass either Ollama tags ("granite4.1:8b") or +logical aliases ("granite-8b"). Mapped to: + vllm -> openai/granite-4.1-{3b,8b} on RIPRAP_LLM_BASE_URL + ollama -> ollama_chat/granite4.1:{3b,8b} on OLLAMA_BASE_URL + +When primary=vllm with fallback=ollama, the LiteLLM Router auto-fails +over to the local Ollama deployment if the AMD endpoint errors (timeout, +connection refused, 5xx). Existing call sites are unaware of the swap. +""" + +from __future__ import annotations + +import logging +import os +import time +from collections.abc import Iterator +from typing import Any + +import litellm +from litellm import Router + +from app import emissions + +log = logging.getLogger(__name__) + +litellm.suppress_debug_info = True +litellm.drop_params = True # silently drop unsupported params instead of erroring + +_VLLM_BASE = os.environ.get("RIPRAP_LLM_BASE_URL", "").rstrip("/") +_VLLM_KEY = os.environ.get("RIPRAP_LLM_API_KEY", "") or "EMPTY" +_PRIMARY = os.environ.get("RIPRAP_LLM_PRIMARY", "ollama").lower() +_FALLBACK = os.environ.get( + "RIPRAP_LLM_FALLBACK", + "ollama" if _PRIMARY == "vllm" else "", +).lower() + +_OLLAMA_BASE = os.environ.get( + "OLLAMA_BASE_URL", + os.environ.get("OLLAMA_HOST", "http://localhost:11434"), +) +if not _OLLAMA_BASE.startswith("http"): + _OLLAMA_BASE = "http://" + _OLLAMA_BASE + +# alias -> (vllm-served-name, ollama-tag) +# In our hackathon vLLM deployment only the 8B is served (one served-name +# per vLLM process and we don't want a second container). Override the +# 3B served-name with RIPRAP_LLM_VLLM_3B_NAME if you stand up a second +# vLLM serving the 3B and want the planner to hit it specifically. +_VLLM_8B = os.environ.get("RIPRAP_LLM_VLLM_8B_NAME", "granite-4.1-8b") +_VLLM_3B = os.environ.get("RIPRAP_LLM_VLLM_3B_NAME", _VLLM_8B) +# Ollama tag overrides: HF Spaces' build disk fills past the threshold +# when both granite4.1:3b and granite4.1:8b are pulled alongside the +# Phase 1 / Phase 4 EO toolchain. Set RIPRAP_OLLAMA_3B_TAG=granite4.1:8b +# on disk-constrained deployments — the planner output is short, so +# the 8B-vs-3B difference is latency, not correctness. +# +# RIPRAP_OLLAMA_8B_TAG is also the cheapest knob for swapping quants +# without touching code: e.g. "granite4.1:8b-q3_K_M" gives ~1 GB of +# memory back vs the default Q4_K_M, at minor grounding-discipline cost +# (re-run the Hollis probe before committing — see CLAUDE.md). +_OLLAMA_3B_TAG = os.environ.get("RIPRAP_OLLAMA_3B_TAG", "granite4.1:3b") +_OLLAMA_8B_TAG = os.environ.get("RIPRAP_OLLAMA_8B_TAG", "granite4.1:8b") +_LOGICAL: dict[str, tuple[str, str]] = { + "granite-3b": (_VLLM_3B, _OLLAMA_3B_TAG), + "granite-8b": (_VLLM_8B, _OLLAMA_8B_TAG), +} +_OLLAMA_TO_LOGICAL = {v[1]: k for k, v in _LOGICAL.items()} +# Also accept the canonical hardcoded tag names so callers passing +# `granite4.1:3b` resolve to the alias even when the env override +# remapped that alias to `granite4.1:8b`. +_OLLAMA_TO_LOGICAL.setdefault("granite4.1:3b", "granite-3b") +_OLLAMA_TO_LOGICAL.setdefault("granite4.1:8b", "granite-8b") + + +def _build_router() -> Router: + model_list: list[dict[str, Any]] = [] + fallbacks: list[dict[str, list[str]]] = [] + use_vllm = _PRIMARY == "vllm" and bool(_VLLM_BASE) + + for alias, (vllm_name, ollama_tag) in _LOGICAL.items(): + if use_vllm: + model_list.append({ + "model_name": alias, + "litellm_params": { + "model": f"openai/{vllm_name}", + "api_base": _VLLM_BASE, + "api_key": _VLLM_KEY, + "timeout": 240, + "stream_timeout": 240, + }, + }) + if _FALLBACK == "ollama": + fb_alias = f"{alias}-ollama" + model_list.append({ + "model_name": fb_alias, + "litellm_params": { + "model": f"ollama_chat/{ollama_tag}", + "api_base": _OLLAMA_BASE, + "timeout": 240, + "stream_timeout": 240, + }, + }) + fallbacks.append({alias: [fb_alias]}) + else: + model_list.append({ + "model_name": alias, + "litellm_params": { + "model": f"ollama_chat/{ollama_tag}", + "api_base": _OLLAMA_BASE, + "timeout": 240, + "stream_timeout": 240, + }, + }) + + log.info("llm router primary=%s fallback=%s vllm_base=%s ollama_base=%s", + _PRIMARY, _FALLBACK or "", + _VLLM_BASE or "", _OLLAMA_BASE) + return Router( + model_list=model_list, + fallbacks=fallbacks, + num_retries=0, # Router fallback handles the failover; no point + # burning seconds re-hitting a dead endpoint. + timeout=240, + ) + + +_router = _build_router() + + +def _resolve_alias(model: str) -> str: + if model in _LOGICAL: + return model + if model in _OLLAMA_TO_LOGICAL: + return _OLLAMA_TO_LOGICAL[model] + return model # pass through; let the router report unknowns + + +def _opts_to_kwargs(options: dict | None) -> dict: + """Translate ollama-style options dict to LiteLLM kwargs. + + Ollama-only knobs (num_ctx) are forwarded via extra_body so that the + ollama_chat backend still receives them; OpenAI/vLLM ignores them + (litellm.drop_params=True). + """ + kw: dict[str, Any] = {} + extra: dict[str, Any] = {} + if options: + if "temperature" in options: + kw["temperature"] = options["temperature"] + if "top_p" in options: + kw["top_p"] = options["top_p"] + if "num_predict" in options: + kw["max_tokens"] = options["num_predict"] + for k in ("num_ctx",): + if k in options: + extra[k] = options[k] + if extra: + kw["extra_body"] = extra + return kw + + +def _extract_documents(messages: list[dict]) -> list[dict]: + """Pull document-role messages into Granite's HF chat-template format. + + Ollama's Modelfile template recognizes `role: "document "` and + bundles the message into a block automatically. The HF + tokenizer chat template (used by vLLM) does *not* — it silently + drops non-standard roles. To make vLLM honor the same grounding + contract, we extract the documents into the chat-template kwarg + `documents=[{"doc_id": ..., "text": ...}]` while leaving the + original document-role messages in place so the Ollama backend + keeps working unchanged on the fallback path. + """ + docs: list[dict] = [] + for m in messages: + role = m.get("role", "") + if role.startswith("document "): + docs.append({ + "doc_id": role.split(" ", 1)[1], + "text": m.get("content", ""), + }) + return docs + + +# vLLM's Granite chat template emits citations as `[doc_id=foo]`; the rest +# of Riprap (Mellea checks, frontend chip rendering, citations regex) all +# expect the bare `[foo]` form that Ollama's Modelfile template produces. +# Normalize transparently so the two backends are interchangeable. +_CITE_NORMALIZE_RE = __import__("re").compile(r"\[doc_id=([A-Za-z0-9_]+)\]") + + +def _normalize_citations(text: str) -> str: + return _CITE_NORMALIZE_RE.sub(r"[\1]", text) + + +def _to_ollama_shape(resp) -> dict: + msg = resp.choices[0].message + content = _normalize_citations(msg.content or "") + return {"message": {"role": "assistant", "content": content}} + + +def _stream_to_ollama_shape(stream, *, on_done=None) -> Iterator[dict]: + accum: list[str] = [] + for chunk in stream: + try: + delta = chunk.choices[0].delta + content = getattr(delta, "content", None) or "" + except (IndexError, AttributeError): + content = "" + # Per-chunk normalize is safe: `[doc_id=X]` arrives as a single + # token sequence inside one chunk in practice, and the regex is + # idempotent / no-op on partial matches. + if content: + content = _normalize_citations(content) + accum.append(content) + yield {"message": {"role": "assistant", "content": content}} + if on_done is not None: + on_done("".join(accum)) + + +def _hardware_for(engine: str) -> str: + """Map the active LLM engine to an emissions.HARDWARE key. + + Operator override via RIPRAP_HARDWARE_LABEL is honored where it + matches a known key (mi300x / l4 / t4 / apple / cpu); otherwise: + - Remote vLLM/Ollama (RIPRAP_LLM_BASE_URL set) → NVIDIA L4. Both + Riprap inference Spaces (msradam/riprap-vllm + msradam/ + riprap-inference) run on L4. The MI300X droplet was retired + 2026-05-06. + - On a CPU/T4-tier HF Space (UI Space with no remote backend) → + T4. + - Otherwise local dev → Apple M-series.""" + override = (os.environ.get("RIPRAP_HARDWARE_LABEL") or "").lower() + if "mi300x" in override or "amd" in override: + return "amd_mi300x" + if "l4" in override: + return "nvidia_l4" + if "t4" in override: + return "nvidia_t4" + if "nvidia" in override: + return "nvidia_l4" + if "apple" in override or "m3" in override or "m4" in override: + return "apple_m" + if _VLLM_BASE: + # Any remote vLLM/Ollama backend currently lives on an L4 Space. + return "nvidia_l4" + if os.environ.get("SPACE_ID") or os.environ.get("HF_SPACE_ID"): + return "nvidia_t4" + return "apple_m" + + +def _extract_usage(resp) -> tuple[int | None, int | None]: + """Pull (prompt_tokens, completion_tokens) from a LiteLLM response. + Returns (None, None) when usage isn't surfaced (some Ollama paths).""" + try: + u = getattr(resp, "usage", None) + if u is None and isinstance(resp, dict): + u = resp.get("usage") + if u is None: + return (None, None) + # LiteLLM's Usage is dict-like / pydantic — accept either shape. + get = (u.get if hasattr(u, "get") else lambda k, d=None: getattr(u, k, d)) + return (get("prompt_tokens"), get("completion_tokens")) + except Exception: # noqa: BLE001 — instrumentation must never throw + return (None, None) + + +def _power_url() -> str | None: + """Build the proxy's /v1/power URL from RIPRAP_LLM_BASE_URL. + Returns None if remote isn't configured.""" + if not _VLLM_BASE: + return None + base = _VLLM_BASE + # _VLLM_BASE looks like https://msradam-riprap-vllm.hf.space/v1 + # The proxy's /v1/power lives at the same /v1 prefix. + if base.endswith("/v1"): + return base + "/power" + return base.rstrip("/") + "/v1/power" + + +def _sample_gpu_power_w() -> float | None: + """Single GET to the proxy's /v1/power endpoint. Returns the + instantaneous reading in watts, or None if unreachable / NVML off.""" + url = _power_url() + if not url: + return None + try: + import httpx as _httpx + with _httpx.Client(timeout=2.0) as c: + r = c.get(url, headers={"Authorization": f"Bearer {_VLLM_KEY}"}) + if r.status_code != 200: + return None + data = r.json() + # Prefer the 1 s rolling average — smooths over the 100 ms sampler + # so a single mid-idle tick doesn't poison the bracket. + for k in ("power_w_avg_1s", "power_w", "power_w_avg_5s"): + v = data.get(k) + if isinstance(v, (int, float)) and v > 0: + return float(v) + except Exception: + return None + return None + + +def _record_llm(*, alias: str, messages: list[dict], duration_s: float, + resp=None, completion_text: str | None = None, + stream: bool = False, + avg_power_w: float | None = None) -> None: + """Record one llm.chat call into the active emissions tracker. + + For non-stream calls, we read prompt/completion tokens off the + LiteLLM response. For stream calls, the response is a generator — + we estimate tokens from concatenated assistant text and from a + char/4 estimate of the input messages. + + `avg_power_w`, when provided, comes from a real NVML read on the + inference proxy (bracketed before / after the call). The tracker + converts that to joules via `power × duration` and flags the row + `measured=True`. Estimates fall through to the data-sheet figure. + """ + info = backend_info() + hardware = _hardware_for(info["engine"]) + backend = info["engine"] + prompt_tokens, completion_tokens = _extract_usage(resp) if resp is not None else (None, None) + if prompt_tokens is None: + prompt_chars = sum(len(m.get("content") or "") for m in messages) + prompt_tokens = emissions.estimate_completion_tokens( + " " * prompt_chars) if prompt_chars else None + if completion_tokens is None and completion_text is not None: + completion_tokens = emissions.estimate_completion_tokens(completion_text) + joules_real = (avg_power_w * duration_s + if avg_power_w is not None and duration_s > 0 else None) + emissions.active().record_llm( + model=alias, + backend=backend, + hardware=hardware, + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + duration_s=duration_s, + stream=stream, + joules_real=joules_real, + power_w_real=avg_power_w, + ) + + +def _default_hardware_label() -> str: + """Best-guess hardware label for the UI badge. + + Auto-detected from env. Operators can override with + RIPRAP_HARDWARE_LABEL (e.g. "NVIDIA L4" / "AMD MI300X" / + "NVIDIA T4" / "Apple M3 Pro"). + + Default when a remote vLLM/Ollama backend is configured is "NVIDIA + L4" — both Riprap inference Spaces (msradam/riprap-vllm, + msradam/riprap-inference) run on L4. The MI300X droplet was + decommissioned 2026-05-06; set RIPRAP_HARDWARE_LABEL=AMD MI300X + explicitly if redeploying to that hardware. + """ + if _PRIMARY == "vllm" and _VLLM_BASE: + return "NVIDIA L4" + if os.environ.get("SPACE_ID") or os.environ.get("HF_SPACE_ID"): + return "NVIDIA T4" + return "Local" + + +def backend_info() -> dict[str, Any]: + """Static description of the active LLM routing for the /api/backend + endpoint and the UI badge. Does not perform a network call; the + /api/backend handler does its own reachability ping.""" + primary_engine = "vLLM" if _PRIMARY == "vllm" and _VLLM_BASE else "Ollama" + fallback_engine = ( + "Ollama" if (_PRIMARY == "vllm" and _FALLBACK == "ollama") + else None + ) + return { + "primary": _PRIMARY if _VLLM_BASE or _PRIMARY != "vllm" else "ollama", + "engine": os.environ.get("RIPRAP_ENGINE_LABEL", primary_engine), + "hardware": os.environ.get("RIPRAP_HARDWARE_LABEL", + _default_hardware_label()), + "model": os.environ.get("RIPRAP_RECONCILER_MODEL", _OLLAMA_8B_TAG), + "vllm_base_url": _VLLM_BASE or None, + "ollama_base_url": _OLLAMA_BASE, + "fallback_engine": fallback_engine, + } + + +def chat(model: str, messages: list[dict], options: dict | None = None, + stream: bool = False, format: str | None = None): + """Drop-in replacement for ollama.chat with router-managed failover. + + Returns: + - stream=False: dict shaped like ollama's response + ({"message": {"role": "assistant", "content": "..."}}). + - stream=True: iterator yielding chunk dicts of the same shape. + + `format="json"` mirrors Ollama's JSON-mode forcing — translated to + OpenAI's response_format for vLLM, and passed through unchanged for + the Ollama backend. + """ + alias = _resolve_alias(model) + kwargs = _opts_to_kwargs(options) + docs = _extract_documents(messages) + if docs: + # Merge into extra_body so Granite's HF chat template (vLLM) + # picks them up. Ollama backend ignores extra_body and keeps + # using the role="document " messages already in `messages`. + eb = kwargs.setdefault("extra_body", {}) + eb["documents"] = docs + eb.setdefault("chat_template_kwargs", {})["documents"] = docs + if format == "json": + # OpenAI/vLLM path + kwargs["response_format"] = {"type": "json_object"} + # Ollama path (LiteLLM forwards this via extra_body for ollama_chat) + kwargs.setdefault("extra_body", {})["format"] = "json" + # Bracket the call with /v1/power samples so we get a real + # NVML-derived energy reading, not a data-sheet estimate. Each + # sample is a sub-100 ms GET; the proxy returns a 1 s rolling avg + # so a single tick of idleness doesn't poison the bracket. + p0 = _sample_gpu_power_w() + t0 = time.monotonic() + if stream: + s = _router.completion(model=alias, messages=messages, + stream=True, **kwargs) + + def _on_stream_done(full_text: str) -> None: + duration_s = time.monotonic() - t0 + p1 = _sample_gpu_power_w() + avg = _avg_w(p0, p1) + _record_llm(alias=alias, messages=messages, + duration_s=duration_s, + completion_text=full_text, stream=True, + avg_power_w=avg) + + return _stream_to_ollama_shape(s, on_done=_on_stream_done) + resp = _router.completion(model=alias, messages=messages, **kwargs) + duration_s = time.monotonic() - t0 + p1 = _sample_gpu_power_w() + avg = _avg_w(p0, p1) + _record_llm(alias=alias, messages=messages, + duration_s=duration_s, resp=resp, stream=False, + avg_power_w=avg) + return _to_ollama_shape(resp) + + +def _avg_w(p0: float | None, p1: float | None) -> float | None: + """Mean of two power samples; falls back to whichever single sample + is available, or None if both failed.""" + pair = [p for p in (p0, p1) if p is not None] + if not pair: + return None + return sum(pair) / len(pair) diff --git a/app/mellea_validator.py b/app/mellea_validator.py new file mode 100644 index 0000000000000000000000000000000000000000..e52fed155e199b7eda79f61a19402bec0426bb11 --- /dev/null +++ b/app/mellea_validator.py @@ -0,0 +1,504 @@ +"""Mellea-validated reconciliation for Riprap. + +Wraps the existing Granite-via-Ollama reconciliation in IBM Research's +Mellea framework: typed output + programmatic post-conditions + +rejection sampling. Replaces post-hoc sentence-dropping with +"don't accept output until requirements pass." + +Streaming and rejection sampling are mutually exclusive — by the time +we'd validate, the user has watched the bad output appear. Strict mode +trades streaming for compliance; the UI shows a "validating" skeleton +instead of token-by-token render. + +The four invariants ported from the parent project's mellea_probe: + + 1. no_invented_numbers — every number in output appears in source + 2. no_placeholder_tokens — output never contains "[source]" or + raw markup + 3. every_claim_cited — each numeric token has a [doc_id] within + ~40 chars + 4. referenced_doc_ids_exist — cited doc_ids ⊆ input doc_ids +""" +from __future__ import annotations + +import logging +import os +import re +import time +from typing import Any + +from mellea import start_session +from mellea.stdlib.requirements import req, simple_validate +from mellea.stdlib.sampling import RejectionSamplingStrategy + +from app import llm + +log = logging.getLogger("riprap.mellea") + +# Default reconciler model — same env-var contract as app/reconcile.py. +DEFAULT_MODEL = os.environ.get( + "RIPRAP_RECONCILER_MODEL", + os.environ.get("RIPRAP_OLLAMA_MODEL", "granite4.1:8b"), +) + +# Loop budget — try up to N samples before falling back to the last +# candidate even if it didn't pass all requirements. Low ceiling so a +# pathological case can't run away with latency. +# +# Override at process start with RIPRAP_MELLEA_MAX_ATTEMPTS. We default +# to 2 on the local Ollama path (where each attempt is 30-90 s on the +# Mac) and 3 on remote/vLLM (where attempts are seconds). This caps +# worst-case demo latency without giving up the principal grounding +# guarantee — the first-attempt pass rate on the curated probes is >85%. +def _default_loop_budget() -> int: + try: + n = int(os.environ.get("RIPRAP_MELLEA_MAX_ATTEMPTS", "0")) + if n > 0: + return n + except ValueError: + pass + return 2 if os.environ.get("RIPRAP_LLM_PRIMARY", "ollama").lower() == "ollama" else 3 + + +DEFAULT_LOOP_BUDGET = _default_loop_budget() + +# Number tokens — \b enforces a word boundary so identifier codes like +# QN1206, B12 (community board), or M14 (bus route) are skipped entirely. +# Inside QN1206 there's no \b between any chars, so no submatch leaks. +_NUM_RE = re.compile(r"\b-?\d[\d,]*(?:\.\d+)?\b") +_CITE_RE = re.compile(r"\[(?P[a-z][a-z0-9_]*)\]") +# Same trivial-numbers list as the post-hoc verifier — well-known service +# line numbers, single digits. +_TRIVIAL_NUMS = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "100", + "311", "911", "211"} + + +def _strip_markdown_for_check(text: str) -> str: + """Drop bold markers + citation tags so the numeric scan is clean.""" + text = re.sub(r"\*\*([^*]+)\*\*", r"\1", text) + text = re.sub(r"\[[a-z0-9_]+\]", "", text, flags=re.I) + return text + + +def _normalize_num(s: str) -> set[str]: + forms = {s} + no_comma = s.replace(",", "") + forms.add(no_comma) + if "." in no_comma: + forms.add(no_comma.rstrip("0").rstrip(".")) + return {f for f in forms if f} + + +def _haystack(doc_msgs: list[dict]) -> str: + return "\n".join(m.get("content", "") for m in doc_msgs) + + +def _doc_ids(doc_msgs: list[dict]) -> set[str]: + """Each doc message has role like "document "; extract ids.""" + out = set() + for m in doc_msgs: + role = m.get("role", "") + if role.startswith("document "): + out.add(role.split(" ", 1)[1].strip()) + return out + + +# --- the four invariants --------------------------------------------------- + + +def _check_no_invented_numbers(doc_msgs: list[dict]): + haystack = _haystack(doc_msgs) + def _fn(text: str): + clean = _strip_markdown_for_check(text) + invented = [] + for n in _NUM_RE.findall(clean): + if n in _TRIVIAL_NUMS: + continue + forms = _normalize_num(n) + if not any(f in haystack for f in forms): + invented.append(n) + return not invented # pass = no invented numbers + return _fn + + +def _check_no_placeholder_tokens(): + def _fn(text: str): + bad = [] + if "[source]" in text.lower(): + bad.append("[source]") + if "") + if "") + if "[doc_id]" in text: + # Model echoed the EXTRA_SYSTEM_PROMPT skeleton literally + bad.append("[doc_id]") + return not bad + return _fn + + +def _check_every_claim_cited(): + """Each non-trivial numeric token must have a [doc_id] somewhere in + the same sentence. Sentence boundaries are conservative: a period + followed by whitespace, or end of text. This matches how a reader + actually attributes claims — the citation can be anywhere in the + sentence, not just adjacent to the number.""" + # Sentence end = `. ` or `.\n` or end-of-string. Question/exclamation + # marks rarely appear in these briefings; period is enough. + _SENT_END = re.compile(r"\.[\s)]|\.$") + + def _sentence_span(text: str, pos: int) -> tuple[int, int]: + # Walk backwards to the previous sentence terminator. + start = 0 + for m in _SENT_END.finditer(text, 0, pos): + start = m.end() + # Walk forwards to the next. + m = _SENT_END.search(text, pos) + end = m.start() + 1 if m else len(text) + return start, end + + def _fn(text: str): + clean = re.sub(r"\*\*([^*]+)\*\*", r"\1", text) + for m in _NUM_RE.finditer(clean): + n = m.group(0) + if n in _TRIVIAL_NUMS: + continue + s, e = _sentence_span(clean, m.start()) + if not _CITE_RE.search(clean[s:e]): + return False + return True + return _fn + + +def _failing_sentences_for_citations(text: str) -> list[str]: + """Return the sentences in `text` that contain a non-trivial number + but no [doc_id] citation. Used to give the model targeted reroll + feedback so it can fix the exact spots that failed.""" + clean = re.sub(r"\*\*([^*]+)\*\*", r"\1", text) + sents = re.split(r"\.[\s)]|\.$", clean) + bad = [] + for s in sents: + nums = [n for n in _NUM_RE.findall(s) if n not in _TRIVIAL_NUMS] + if nums and not _CITE_RE.search(s): + bad.append(s) + return bad + + +def _check_referenced_doc_ids_exist(doc_msgs: list[dict]): + valid = _doc_ids(doc_msgs) + def _fn(text: str): + cited = {m.group("id") for m in _CITE_RE.finditer(text)} + rogue = cited - valid + return not rogue + return _fn + + +# --- main entry point ------------------------------------------------------ + + +def reconcile_strict(doc_msgs: list[dict], + system_prompt: str, + user_prompt: str = "Write the cited briefing now.", + model: str | None = None, + loop_budget: int = DEFAULT_LOOP_BUDGET, + ollama_options: dict | None = None) -> dict[str, Any]: + """Run Granite reconciliation with Mellea rejection sampling. + + Returns a dict with: + paragraph — final validated text + rerolls — number of resamples (0 = passed first try) + requirements_passed — list of requirement names that passed in the + accepted sample + requirements_failed — list of requirement names that failed + (empty on accepted sample) + elapsed_s — total seconds including rerolls + model — model id used + loop_budget — configured budget + """ + model = model or DEFAULT_MODEL + t0 = time.time() + + # Per-requirement closures wired with the doc context. + # Keep the validator functions in our own table so we can re-run them + # on the final paragraph to produce reliable pass/fail metadata for + # the report — Mellea's internal validation-result objects vary by + # version and aren't great for downstream display. + checks = [ + ("numerics_grounded", + "All numbers in the output must appear verbatim in the source documents.", + _check_no_invented_numbers(doc_msgs)), + ("no_placeholder_tokens", + "The output must not contain placeholder tokens like [source] or raw markup.", + _check_no_placeholder_tokens()), + ("citations_dense", + "Every numeric claim must have a [doc_id] citation within ~120 characters.", + _check_every_claim_cited()), + ("citations_resolve", + "Every cited [doc_id] must correspond to a real source document.", + _check_referenced_doc_ids_exist(doc_msgs)), + ] + requirements = [ + req(desc, validation_fn=simple_validate(fn, reason=name)) + for name, desc, fn in checks + ] + + session = start_session(backend_name="ollama", model_id=model, + model_options=ollama_options or {}) + try: + # Build the prompt: system + serialized doc context + user task. + # Mellea's instruct() takes the whole instruction; we serialize + # the doc messages into the description so the haystack is + # available to the model the same way it would be via + # ollama.chat with role="document " messages. + doc_block = "\n\n".join( + f"\n" + f"{m['content']}\n" + for m in doc_msgs + ) + instruction = ( + f"{system_prompt}\n\n" + f"DOCUMENTS:\n{doc_block}\n\n" + f"TASK: {user_prompt}" + ) + + result = session.instruct( + description=instruction, + strategy=RejectionSamplingStrategy( + loop_budget=loop_budget, + requirements=requirements, + ), + requirements=requirements, + return_sampling_results=True, + model_options={"temperature": 0, + "num_ctx": int(os.environ.get("RIPRAP_MELLEA_NUM_CTX", "4096")), + "num_predict": int(os.environ.get("RIPRAP_MELLEA_NUM_PREDICT", "600")), + **(ollama_options or {})}, + ) + + paragraph = _extract_text(result).strip() + n_attempts = _extract_attempts(result) + rerolls = max(0, n_attempts - 1) + finally: + try: + session.cleanup() + except Exception: + pass + + # Re-run our own checks on the final paragraph for clean pass/fail + # metadata. This is what shows up in the report's compliance section. + passed: list[str] = [] + failed: list[str] = [] + for name, _desc, fn in checks: + try: + if fn(paragraph): + passed.append(name) + else: + failed.append(name) + except Exception as e: + log.warning("requirement %s raised: %r", name, e) + failed.append(name) + + return { + "paragraph": paragraph, + "rerolls": rerolls, + "n_attempts": n_attempts, + "requirements_total": len(checks), + "requirements_passed": passed, + "requirements_failed": failed, + "elapsed_s": round(time.time() - t0, 2), + "model": model, + "loop_budget": loop_budget, + } + + +def reconcile_strict_streaming( + doc_msgs: list[dict], + system_prompt: str, + user_prompt: str = "Write the cited briefing now.", + model: str | None = None, + loop_budget: int = DEFAULT_LOOP_BUDGET, + ollama_options: dict | None = None, + on_token=None, + on_attempt_end=None, +) -> dict[str, Any]: + """Hand-rolled rejection sampler that *streams* each attempt to the + user instead of waiting silently for Mellea to validate behind the + scenes. Same compliance contract as reconcile_strict — runs the + same four checks, accepts the first attempt that passes, falls back + to the last attempt if the budget is exhausted. + + Callbacks (both optional, both fire on the calling thread): + on_token(delta: str, attempt_idx: int) + — fires for every token chunk as it arrives from Granite. + on_attempt_end(attempt_idx: int, passed: list[str], failed: list[str]) + — fires after each attempt with its per-requirement outcome. + The frontend uses this to render reroll banners + reset the + briefing buffer when a new attempt begins. + """ + model = model or DEFAULT_MODEL + t0 = time.time() + + checks = [ + ("numerics_grounded", + _check_no_invented_numbers(doc_msgs)), + ("no_placeholder_tokens", + _check_no_placeholder_tokens()), + ("citations_dense", + _check_every_claim_cited()), + ("citations_resolve", + _check_referenced_doc_ids_exist(doc_msgs)), + ] + + base_messages = doc_msgs + [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ] + # num_ctx 4096 fits a typical trimmed prompt (≈700 system + ≈2500 docs); + # num_predict 400 caps the 4-section briefing at ≈300-350 tokens. With + # RIPRAP_TRIM_DOCS=1 and the planner picking 6-9 specialists, the 4096 + # window has been sufficient on every probe; the previous 6144/600 was + # sized for the *untrimmed* fan-out and was forcing Ollama to grow the + # KV cache (33% more memory + a full re-init) every Mellea attempt. + # Override with RIPRAP_MELLEA_NUM_CTX / RIPRAP_MELLEA_NUM_PREDICT. + base_opts = {"temperature": 0, + "num_ctx": int(os.environ.get("RIPRAP_MELLEA_NUM_CTX", "4096")), + "num_predict": int(os.environ.get("RIPRAP_MELLEA_NUM_PREDICT", "600")), + **(ollama_options or {})} + + paragraph = "" + last_passed: list[str] = [] + last_failed: list[str] = [name for name, _ in checks] + last_paragraph = "" + attempts = 0 + + for attempt_idx in range(loop_budget): + attempts = attempt_idx + 1 + # On reroll, append a tight feedback message naming what failed AND + # the specific failing sentences (so the model knows exactly which + # ones to fix). Granite responds well to surgical corrections. + messages = list(base_messages) + if attempt_idx > 0 and last_failed: + feedback = [ + f"Your previous draft failed: {', '.join(last_failed)}.", + ] + if "citations_dense" in last_failed and last_paragraph: + bad = _failing_sentences_for_citations(last_paragraph) + if bad: + feedback.append( + "Specific sentences with uncited numbers:" + ) + for s in bad[:3]: + feedback.append(f" - {s.strip()}") + feedback.append( + "Add a [doc_id] citation at the end of each. " + "Re-emit the FULL briefing." + ) + else: + feedback.append( + "Re-write so every sentence containing a number ends " + "with a [doc_id] citation." + ) + messages.append({"role": "user", "content": "\n".join(feedback)}) + + chunks: list[str] = [] + for chunk in llm.chat(model=model, messages=messages, + stream=True, options=base_opts): + delta = (chunk.get("message") or {}).get("content") or "" + if delta: + chunks.append(delta) + if on_token is not None: + try: + on_token(delta, attempt_idx) + except Exception: + log.exception("on_token callback raised") + paragraph = "".join(chunks).strip() + + passed: list[str] = [] + failed: list[str] = [] + for name, fn in checks: + try: + (passed if fn(paragraph) else failed).append(name) + except Exception as e: + log.warning("requirement %s raised: %r", name, e) + failed.append(name) + + last_passed = passed + last_failed = failed + last_paragraph = paragraph + if on_attempt_end is not None: + try: + on_attempt_end(attempt_idx, passed, failed) + except Exception: + log.exception("on_attempt_end callback raised") + + if not failed: + break + + return { + "paragraph": paragraph, + "rerolls": max(0, attempts - 1), + "n_attempts": attempts, + "requirements_total": len(checks), + "requirements_passed": last_passed, + "requirements_failed": last_failed, + "elapsed_s": round(time.time() - t0, 2), + "model": model, + "loop_budget": loop_budget, + } + + +def _extract_text(result) -> str: + """SamplingResult / ModelOutputThunk text extraction.""" + for attr in ("sample", "result", "value", "content"): + v = getattr(result, attr, None) + if v is not None: + if hasattr(v, "value"): + return str(v.value) + return str(v) + return str(result) + + +def _extract_attempts(result) -> int: + """How many samples were drawn before stopping.""" + for attr in ("n_attempts", "num_attempts", "attempts"): + v = getattr(result, attr, None) + if isinstance(v, int): + return v + samples = getattr(result, "sample_validations", None) or getattr(result, "samples", None) + if isinstance(samples, list): + return len(samples) + return 1 + + +def _extract_pass_fail(result) -> tuple[list[str], list[str]]: + """Best-effort extraction of which requirements passed on the + accepted sample. mellea v0.4 exposes sample_validations as a list + where each entry is itself a list of (Requirement, ValidationResult) + tuples — duck-type defensively. + """ + validations = getattr(result, "sample_validations", None) + if not validations: + return [], [] + last = validations[-1] if isinstance(validations, list) else validations + passed: list[str] = [] + failed: list[str] = [] + items = last if isinstance(last, list) else [last] + for item in items: + # Item might be (Requirement, ValidationResult) tuple, or a single + # ValidationResult, or a Requirement, depending on mellea version. + ok = None + descr = "" + if isinstance(item, tuple) and len(item) >= 2: + descr = str(item[0])[:80] + v = item[1] + ok = bool(getattr(v, "passed", getattr(v, "is_valid", + getattr(v, "result", False)))) + else: + descr = str(getattr(item, "requirement", item))[:80] + ok = bool(getattr(item, "passed", getattr(item, "is_valid", + getattr(item, "result", False)))) + if ok: + passed.append(descr) + else: + failed.append(descr) + return passed, failed diff --git a/app/planner.py b/app/planner.py new file mode 100644 index 0000000000000000000000000000000000000000..f91c6ea6b0f81e5f16acd2a58d052a0f129a011b --- /dev/null +++ b/app/planner.py @@ -0,0 +1,346 @@ +"""Riprap query planner — Granite 4.1 routes a natural-language query +to one of several intents and selects which specialists to invoke. + +This is the agentic kernel: instead of running every specialist on +every query, the planner reads the query and emits a structured plan. +The executor then runs only the relevant specialists, in parallel +where dependencies permit. + +Output is a single JSON object with a fixed schema (see PLAN_SCHEMA). +We use Ollama's `format='json'` constrained-decoding mode so Granite +4.1 cannot emit malformed structure. A deterministic post-validator +sanity-checks the plan against the supported intents and specialists. +""" +from __future__ import annotations + +import json +import logging +import os +import re +from dataclasses import dataclass +from typing import Any + +from app import llm + +log = logging.getLogger("riprap.planner") + +# Routing is a small structured-output task; speed wins over depth here. +# Pin to the 3b variant explicitly — even if a deployment pulls 8b for +# reconciliation, the planner stays small to keep TTFB low. +OLLAMA_MODEL = os.environ.get("RIPRAP_PLANNER_MODEL", + os.environ.get("RIPRAP_OLLAMA_MODEL", "granite4.1:3b")) + +# ---- Plan schema ----------------------------------------------------------- +# +# The set of intents Riprap currently supports. Every plan picks exactly +# one; the executor maps intent → action graph in app/intents/. + +INTENTS = { + "single_address": ( + "Use when the query refers to a SPECIFIC LOCATABLE POINT — either " + "(a) a street address with house number + street name (e.g. " + "'116-50 Sutphin Blvd', '350 5th Ave Manhattan'), or (b) a named " + "development, complex, or housing project that geocodes to a single " + "location (e.g. 'Coney Island I Houses', 'Carleton Manor Houses', " + "'Vladeck Houses'). If the query names only a general neighborhood " + "or borough with no specific address or named building, use " + "'neighborhood'." + ), + "neighborhood": ( + "Use when the query names a NEIGHBORHOOD or BOROUGH with no " + "specific street address (e.g. 'Brighton Beach', 'Carroll " + "Gardens', 'Brooklyn', 'is Red Hook at risk?', 'show me Hollis " + "flooding'). Skip geocoding; resolve to NTA polygon(s) and run " + "polygon-level specialists." + ), + "live_now": ( + "User asked about CURRENT CONDITIONS in NYC (e.g. 'is there " + "flooding right now', 'what's the surge tonight'). Skip historic " + "and modeled specialists; focus on live-data specialists." + ), + "development_check": ( + "User asked about CURRENT/IN-PROGRESS CONSTRUCTION OR DEVELOPMENT " + "in a place, with implicit interest in flood risk for those projects " + "(e.g. 'what are they building in Gowanus and is it risky?', " + "'show me new construction in flood zones', 'are there projects " + "underway in Red Hook?'). Resolve target to NTA polygon, pull active " + "DOB construction permits inside it, cross-reference each project " + "with Sandy + DEP flood layers, return a flagged-projects list." + ), + "compare": ( + "Use ONLY when the query explicitly compares TWO specific street " + "ADDRESSES (e.g. 'compare 80 Pioneer St Brooklyn to 100 Gold St " + "Manhattan', 'which is riskier: X or Y?', 'X vs Y flood risk'). " + "Extract BOTH full street addresses into targets as two separate " + "{type: 'address', text: ...} objects. Run the full single-address " + "specialist suite for each." + ), +} + +SPECIALISTS = { + # name: (description, which intents may invoke it) + "geocode": ("Resolve address text to lat/lon via NYC DCP Geosearch.", ["single_address", "compare"]), + "nta_resolve": ("Resolve a neighborhood or borough name to NTA polygon(s).", ["neighborhood"]), + "sandy": ("2012 Sandy inundation extent (point-in-polygon or % of NTA).", ["single_address", "neighborhood", "compare"]), + "dep_stormwater":("DEP Stormwater Maps — 3 modeled scenarios.", ["single_address", "neighborhood", "compare"]), + "floodnet": ("Live FloodNet ultrasonic sensors + trigger history.", ["single_address", "neighborhood", "live_now", "compare"]), + "nyc311": ("NYC 311 flood-related complaints in buffer or polygon.", ["single_address", "neighborhood", "compare"]), + "noaa_tides": ("Live NOAA Battery / Kings Pt / Sandy Hook water level.", ["single_address", "neighborhood", "live_now", "compare"]), + "nws_alerts": ("Live NWS active flood-relevant alerts at point.", ["single_address", "neighborhood", "live_now", "compare"]), + "npcc4_slr": ("NPCC4 (2024) sea-level rise projections at the Battery — 2050/2100 low/mid/high/extreme.", ["single_address", "neighborhood", "compare"]), + "nws_obs": ("Live NWS hourly precip from nearest ASOS station.", ["single_address", "neighborhood", "live_now", "compare"]), + "ttm_forecast": ("Granite TTM r2 surge-residual nowcast at the Battery.", ["single_address", "neighborhood", "live_now", "compare"]), + "microtopo": ("LiDAR-derived terrain (HAND, TWI, percentile) at point or aggregated over polygon.", ["single_address", "neighborhood", "compare"]), + "ida_hwm": ("USGS Hurricane Ida 2021 high-water marks proximity.", ["single_address", "neighborhood", "compare"]), + "prithvi": ("Prithvi-EO 2.0 Hurricane Ida 2021 satellite flood polygons.", ["single_address", "neighborhood", "compare"]), + "rag": ("Retrieve relevant agency-report passages over the policy corpus.", ["single_address", "neighborhood", "development_check", "compare"]), + "dob_permits": ("Active NYC DOB construction permits inside a polygon, each cross-referenced with Sandy + DEP flood scenarios. Use for 'what are they building' / 'projects in progress' queries.", ["development_check"]), +} + + +@dataclass +class Plan: + intent: str + targets: list[dict[str, str]] + specialists: list[str] + rationale: str + + +PLAN_SCHEMA_DESC = """The output JSON must have exactly these keys: + +{ + "intent": one of [single_address, neighborhood, live_now, development_check], + "targets": [ + // one or more target objects, each with: + // {"type": "address", "text": "
"} when intent=single_address + // {"type": "nta", "text": ""} when intent=neighborhood + // {"type": "borough", "text": ""} when intent=neighborhood (boro-wide) + // {"type": "nyc", "text": "NYC"} when intent=live_now (no specific place) + ], + "specialists": [list of specialist names from the SPECIALISTS catalog the executor should run], + "rationale": "" +} + +Hard rules: +- Pick ONE intent only. +- Specialists must be drawn from the catalog and must be applicable to the chosen intent. +- For intent=single_address: ALWAYS include "geocode". Typically include all static + live specialists. +- For intent=neighborhood: ALWAYS include "nta_resolve". Skip "geocode". Include polygon-capable specialists. +- For intent=live_now: ONLY live specialists. Skip historic/modeled (sandy, dep_*, ida_hwm, prithvi). +- For intent=development_check: ALWAYS include "nta_resolve" AND "dob_permits". Sandy + DEP are also useful so the model can compare project locations to flood layers. +- For intent=compare: ALWAYS include "geocode". Extract BOTH street addresses into targets — the executor runs the full specialist suite once per address. Targets must be exactly 2 items, both type="address". +- IMPORTANT — TARGETS: extract neighborhood/borough names directly from the query text. If the query says "in Gowanus", "what about Brighton Beach", "around Carroll Gardens", etc., the target MUST be {"type": "nta", "text": ""}. Use {"type": "nyc"} ONLY when the query mentions NYC as a whole and no specific place. Failing to extract a place name will cause the executor to give up — be explicit. +- "targets" is a list because the user may name multiple places (e.g. "compare Brighton Beach and Coney Island"). +- "rationale" is one short sentence — what your reasoning was. +""" + + +SYSTEM_PROMPT = f"""You are Riprap's query planner. You read a user's natural-language flood-risk query and emit a structured execution plan. + +You do NOT have access to any data. You only decide which intent fits the query and which specialists are relevant. Another component (the executor) will run the specialists. + +Available intents: +{chr(10).join(f" - {k}: {v}" for k, v in INTENTS.items())} + +Available specialists (and which intents they apply to): +{chr(10).join(f" - {name}: {desc} (intents: {', '.join(intents)})" for name, (desc, intents) in SPECIALISTS.items())} + +{PLAN_SCHEMA_DESC} + +Output ONLY the JSON object. No commentary, no markdown. The "rationale" field MUST be ONE SHORT SENTENCE (under 20 words). Stop immediately after the closing brace.""" + + +# ---- Not-implemented short-circuits ---------------------------------------- +# +# These patterns are well-defined feature gaps. Returning a graceful message +# is better than routing them into an intent that silently fails. + +_RETROSPECTIVE_RE = re.compile( + r"(?:what\s+would\s+(?:riprap|you|it)\s+have\s+said" + r"|what\s+(?:was|were)\s+(?:the\s+)?(?:flood|risk|status)" + r"|(?:as\s+of|on)\s+(?:august|september|october|november|december|january|" + r"february|march|april|may|june|july)\s+\d" + r"|on\s+(?:the\s+date\s+of|hurricane\s+ida|hurricane\s+sandy)" + r"|(?:september|august|october)\s+\d{1,2},?\s+20\d{2}" + r")", + re.IGNORECASE, +) + +_RANKING_RE = re.compile( + r"(?:rank\s+(?:the\s+)?top\s+\d" + r"|top\s+\d+\s+\w+\s+by\s+flood" + r"|intersect(?:ed)?\s+with\s+(?:dac|ejnyc|social\s+vulnerability)" + r"|sort(?:ed)?\s+by\s+(?:flood\s+)?(?:exposure|risk|score)" + r")", + re.IGNORECASE, +) + +NOT_IMPLEMENTED_INTENTS = { + "retrospective": ( + _RETROSPECTIVE_RE, + "Historical-date mode (\"what would Riprap have said on [date]\") " + "is on the roadmap but not yet available. Riprap currently reports " + "present-state flood exposure; past-state reconstruction is planned " + "for a future release (see deck slide 8).", + ), + "ranking": ( + _RANKING_RE, + "Cross-development ranking queries (\"rank top N by flood exposure\", " + "\"intersect with DAC designation\") require a cross-register join " + "that is on the roadmap but not yet available. Try a specific address " + "or neighborhood instead.", + ), +} + + +def _not_implemented_message(query: str) -> str | None: + """Return a user-facing message if the query matches a known feature gap, + else None.""" + for _name, (pattern, message) in NOT_IMPLEMENTED_INTENTS.items(): + if pattern.search(query): + return message + return None + + +# ---- Planner call ---------------------------------------------------------- + +def plan(query: str, model: str = OLLAMA_MODEL, on_token=None) -> Plan: + """Ask Granite 4.1 to plan a query. Returns a validated Plan. + + If on_token is provided, the planner runs in streaming mode and + on_token(delta) is called for each chunk of the JSON output as + Granite generates. The streaming endpoint uses this to show the + agent's reasoning forming live in the UI. + """ + msg = _not_implemented_message(query) + if msg: + log.info("planner: short-circuit not_implemented for query %r", query[:80]) + if on_token: + on_token(json.dumps({"intent": "not_implemented", "message": msg})) + return Plan(intent="not_implemented", targets=[], + specialists=[], rationale=msg) + + messages = [ + {"role": "system", "content": SYSTEM_PROMPT}, + {"role": "user", "content": query}, + ] + # Cap output — the plan JSON is tiny; an uncapped model can spin + # forever in the rationale field and exhaust the stream timeout. + _opts = {"temperature": 0, "num_predict": 512} + if on_token is None: + resp = llm.chat(model=model, messages=messages, + format="json", options=_opts) + raw = resp["message"]["content"].strip() + else: + chunks: list[str] = [] + for chunk in llm.chat(model=model, messages=messages, + format="json", stream=True, + options=_opts): + delta = (chunk.get("message") or {}).get("content") or "" + if delta: + chunks.append(delta) + on_token(delta) + raw = "".join(chunks).strip() + log.info("planner raw: %s", raw[:400]) + try: + d = json.loads(raw) + except json.JSONDecodeError: + # Model hit num_predict ceiling mid-JSON — try salvaging with a + # truncated-JSON repair: strip to last valid closing brace. + trimmed = raw + for i in range(len(raw) - 1, -1, -1): + if raw[i] == "}": + trimmed = raw[:i + 1] + break + try: + d = json.loads(trimmed) + log.warning("planner JSON repaired by trimming to last '}'") + except json.JSONDecodeError as e2: + raise ValueError(f"planner emitted non-JSON: {raw[:200]!r}") from e2 + return _validate(d, raw_query=query) + + +def _validate(d: dict[str, Any], raw_query: str) -> Plan: # TODO(cleanup): cc-grade-D (23) + """Defensive parse + sanitize. The model might pick an invalid intent + or a specialist that isn't applicable; fall back to single_address + with the raw query as the address (the most common case).""" + intent = d.get("intent") + if intent not in INTENTS: + log.warning("planner picked invalid intent %r; defaulting to single_address", intent) + intent = "single_address" + + raw_targets = d.get("targets") or [] + targets: list[dict[str, str]] = [] + for t in raw_targets: + if not isinstance(t, dict): + continue + t_type = t.get("type") + t_text = (t.get("text") or "").strip() + if not t_text or t_type not in ("address", "nta", "borough", "nyc"): + continue + targets.append({"type": t_type, "text": t_text}) + if not targets: + # Reasonable fallback: assume the raw query IS the target + if intent == "single_address": + targets = [{"type": "address", "text": raw_query}] + elif intent == "neighborhood": + targets = [{"type": "nta", "text": raw_query}] + elif intent == "compare": + # Planner failed to extract two addresses — treat whole query as + # single address so the caller gets at least one result rather + # than a confusing empty response. + log.warning("compare intent but no valid targets extracted; " + "falling back to single raw query") + targets = [{"type": "address", "text": raw_query}] + else: + targets = [{"type": "nyc", "text": "NYC"}] + + raw_specialists = d.get("specialists") or [] + specialists: list[str] = [] + for s in raw_specialists: + if isinstance(s, str) and s in SPECIALISTS: + _, applicable = SPECIALISTS[s] + if intent in applicable: + specialists.append(s) + # Enforce a floor: each intent has canonical specialists that should + # always run. The planner picks ADDITIONS; we ensure the minimum. + required = _required_specialists(intent) + added = [s for s in required if s not in specialists] + if added: + log.info("planner missed required %s for intent=%s; adding", added, intent) + specialists = list(dict.fromkeys(specialists + required)) + if not specialists: + specialists = _default_specialists(intent) + + rationale = (d.get("rationale") or "").strip()[:300] or "(no rationale provided)" + return Plan(intent=intent, targets=targets, specialists=specialists, rationale=rationale) + + +def _required_specialists(intent: str) -> list[str]: + """Floor: specialists that are ALWAYS run for an intent regardless of + what the planner emitted. Captures load-bearing signals the planner + sometimes forgets (sandy / dep for neighborhood; geocode for address).""" + if intent == "single_address": + return ["geocode", "sandy", "dep_stormwater", "microtopo"] + if intent == "neighborhood": + return ["nta_resolve", "sandy", "dep_stormwater", "nyc311"] + if intent == "live_now": + return ["nws_alerts", "noaa_tides"] + if intent == "development_check": + return ["nta_resolve", "dob_permits", "sandy", "dep_stormwater"] + if intent == "compare": + return ["geocode", "sandy", "dep_stormwater", "microtopo"] + return [] + + +def _default_specialists(intent: str) -> list[str]: + if intent in ("single_address", "compare"): + return ["geocode", "sandy", "dep_stormwater", "floodnet", "nyc311", + "noaa_tides", "nws_alerts", "nws_obs", "ttm_forecast", + "microtopo", "ida_hwm", "prithvi", "rag"] + if intent == "neighborhood": + return ["nta_resolve", "sandy", "dep_stormwater", "nyc311", + "microtopo", "rag"] + if intent == "live_now": + return ["noaa_tides", "nws_alerts", "nws_obs", "ttm_forecast", "floodnet"] + return [] diff --git a/app/rag.py b/app/rag.py new file mode 100644 index 0000000000000000000000000000000000000000..ced90c4e837f332b834247a7c045238ee03e1b20 --- /dev/null +++ b/app/rag.py @@ -0,0 +1,289 @@ +"""Granite Embedding 278M RAG over the NYC flood-resilience policy corpus. + +Specialists this powers: + step_rag — for any query (geo + intent), retrieve top-k relevant + policy paragraphs from HMP/NPCC4/DEP/MTA/NYCHA/Comptroller + and emit them as blocks. + +We chunk page-by-page with a soft target of ~600 chars per chunk, embed +once at startup, and store a numpy matrix + FAISS L2 index in memory. +The index is small (~1k chunks across 5 PDFs). +""" +from __future__ import annotations + +import logging +import os +import re +from dataclasses import dataclass +from pathlib import Path + +import numpy as np + +log = logging.getLogger("riprap.rag") + +CORPUS_DIR = Path(__file__).resolve().parent.parent / "corpus" +EMBED_MODEL_NAME = "ibm-granite/granite-embedding-278m-multilingual" + +CORPUS_META = { + "dep_wastewater_2013.pdf": { + "doc_id": "rag_dep_2013", + "title": "NYC DEP Wastewater Resiliency Plan (2013)", + "citation": "NYC DEP Wastewater Resiliency Plan, 2013", + }, + "nycha_lessons.pdf": { + "doc_id": "rag_nycha", + "title": "Flood Resilience at NYCHA — Lessons Learned", + "citation": "NYCHA, Flood Resilience: Lessons Learned", + }, + "coned_22_e_0222.pdf": { + "doc_id": "rag_coned", + "title": "Con Edison Climate Change Resilience Plan (2023, Case 22-E-0222)", + "citation": "Con Edison Climate Change Resilience Plan (2023, NY PSC Case 22-E-0222)", + }, + "mta_resilience_2025.pdf": { + "doc_id": "rag_mta", + "title": "MTA Climate Resilience Roadmap (October 2025 update)", + "citation": "MTA Climate Resilience Roadmap, October 2025 update", + }, + "comptroller_rain_2024.pdf": { + "doc_id": "rag_comptroller", + "title": "NYC Comptroller — Is NYC Ready for Rain? (2024)", + "citation": "NYC Comptroller, \"Is New York City Ready for Rain?\" (2024)", + }, +} + + +@dataclass +class Chunk: + text: str + file: str + page: int + doc_id: str + title: str + citation: str + + +def _chunks_from_pdf(path: Path, target_chars: int = 700) -> list[Chunk]: + import pypdf + meta = CORPUS_META.get(path.name, { + "doc_id": f"rag_{path.stem}", + "title": path.stem, + "citation": path.stem, + }) + out: list[Chunk] = [] + try: + reader = pypdf.PdfReader(str(path)) + except Exception as e: + log.warning("pdf load failed for %s: %s", path.name, e) + return out + for i, page in enumerate(reader.pages): + try: + txt = page.extract_text() or "" + except Exception: + txt = "" + txt = re.sub(r"\s+", " ", txt).strip() + if len(txt) < 80: + continue + # split into ~target_chars chunks at sentence boundaries + sentences = re.split(r"(?<=[.!?])\s+", txt) + buf = "" + for s in sentences: + if len(buf) + len(s) + 1 <= target_chars or not buf: + buf = (buf + " " + s).strip() if buf else s + else: + out.append(Chunk(text=buf, file=path.name, page=i + 1, + doc_id=meta["doc_id"], title=meta["title"], + citation=meta["citation"])) + buf = s + if buf: + out.append(Chunk(text=buf, file=path.name, page=i + 1, + doc_id=meta["doc_id"], title=meta["title"], + citation=meta["citation"])) + return out + + +_INDEX: dict | None = None +_RERANKER = None # lazy CrossEncoder + +# Reranker switch: when "1", retrieve() over-fetches K*5 candidates without +# the per-doc dedup, scores them via the Granite Embedding Reranker R2 +# cross-encoder, then dedups to K. Falls back to the baseline ranker when +# disabled. See experiments/03_granite_reranker/RESULTS.md for the +# reasoning behind inverting dedup vs rerank. +_RERANKER_ENABLE = os.environ.get("RIPRAP_RERANKER_ENABLE", "").lower() in ("1", "true", "yes") +_RERANKER_MODEL_NAME = os.environ.get( + "RIPRAP_RERANKER_MODEL", + "ibm-granite/granite-embedding-reranker-english-r2", +) + + +def _ensure_index(): + global _INDEX + if _INDEX is not None: + return _INDEX + + chunks: list[Chunk] = [] + for f in sorted(CORPUS_DIR.glob("*.pdf")): + log.info("rag: chunking %s", f.name) + chunks.extend(_chunks_from_pdf(f)) + log.info("rag: %d chunks across %d files", + len(chunks), len(set(c.file for c in chunks))) + if not chunks: + _INDEX = {"chunks": [], "embs": None, "model": None} + return _INDEX + + texts = [c.text for c in chunks] + log.info("rag: embedding %d chunks", len(texts)) + + # v0.4.5 — try the MI300X service first. Avoids loading + # sentence-transformers + the granite-embedding weights on a + # cpu-basic surface (HF Space). Falls back to local on + # RemoteUnreachable so dev laptops keep working with no env. + embs = None + model = None + try: + from app import inference as _inf + if _inf.remote_enabled(): + log.info("rag: encoding via remote MI300X") + remote = _inf.granite_embed(texts, timeout=120.0) + if remote.get("ok"): + embs = np.asarray(remote["vectors"], dtype="float32") + # Per-query encodes will also route through remote; + # `model` stays None and `retrieve()` checks for it. + except _inf.RemoteUnreachable as e: + log.info("rag: remote unreachable (%s); local fallback", e) + except Exception: + log.exception("rag: remote encode failed; local fallback") + + if embs is None: + from sentence_transformers import SentenceTransformer + log.info("rag: loading %s (local fallback)", EMBED_MODEL_NAME) + model = SentenceTransformer(EMBED_MODEL_NAME) + embs = model.encode(texts, batch_size=32, show_progress_bar=False, + convert_to_numpy=True, normalize_embeddings=True) + embs = embs.astype("float32") + + _INDEX = {"chunks": chunks, "embs": embs, "model": model} + log.info("rag: index ready (%s)", embs.shape) + return _INDEX + + +def _ensure_reranker(): + """Lazy-load the cross-encoder. Returns None if disabled or load fails; + callers fall back to the baseline ranker silently.""" + global _RERANKER + if not _RERANKER_ENABLE: + return None + if _RERANKER is not None: + return _RERANKER + try: + from sentence_transformers import CrossEncoder + log.info("rag: loading reranker %s", _RERANKER_MODEL_NAME) + _RERANKER = CrossEncoder(_RERANKER_MODEL_NAME) + log.info("rag: reranker ready") + except Exception: + log.exception("rag: reranker load failed; falling back to baseline") + _RERANKER = False # sentinel: don't retry every call + return _RERANKER or None + + +def warm(): + _ensure_index() + _ensure_reranker() + + +def retrieve(query: str, k: int = 4, min_score: float = 0.30) -> list[dict]: + idx = _ensure_index() + if idx["embs"] is None or not idx["chunks"]: + return [] + + # v0.4.5 — encode query via remote when corpus was embedded remotely. + # `_ensure_index` leaves `model = None` when it took the remote + # path, so this branch handles both: + # - model present → local SentenceTransformer.encode (fast, in-mem) + # - model is None → POST to MI300X, fallback to a one-shot local + # SentenceTransformer load if remote is down. + if idx["model"] is not None: + qv = idx["model"].encode([query], convert_to_numpy=True, + normalize_embeddings=True).astype("float32") + else: + qv = None + try: + from app import inference as _inf + if _inf.remote_enabled(): + remote = _inf.granite_embed([query]) + if remote.get("ok"): + qv = np.asarray(remote["vectors"], dtype="float32") + except _inf.RemoteUnreachable as e: + log.info("rag: per-query encode remote unreachable (%s)", e) + if qv is None: + from sentence_transformers import SentenceTransformer + log.info("rag: cold-loading %s for per-query encode (remote down)", + EMBED_MODEL_NAME) + local = SentenceTransformer(EMBED_MODEL_NAME) + qv = local.encode([query], convert_to_numpy=True, + normalize_embeddings=True).astype("float32") + # Cache so subsequent queries don't re-load + idx["model"] = local + sims = (idx["embs"] @ qv.T).ravel() + + reranker = _ensure_reranker() + if reranker is not None: + # Over-fetch K*5 candidates (no per-doc dedup yet), rerank, then + # dedup to K. This keeps high-relevance chunks alive long enough + # for the cross-encoder to see them — the legacy path's + # dedup-before-rank threw them away. + cand_n = min(len(idx["chunks"]), max(k * 5, 20)) + top_idx = np.argsort(-sims)[:cand_n] + candidates = [(int(i), idx["chunks"][int(i)], + float(sims[int(i)])) for i in top_idx + if float(sims[int(i)]) >= min_score] + if not candidates: + return [] + pairs = [[query, c.text] for _, c, _ in candidates] + scores = reranker.predict(pairs) + ranked = sorted(zip(candidates, scores, strict=True), + key=lambda x: float(x[1]), reverse=True) + out: list[dict] = [] + seen_per_doc: dict[str, int] = {} + for (_i, c, retr_score), rerank_score in ranked: + if seen_per_doc.get(c.doc_id, 0) >= 1: + continue + seen_per_doc[c.doc_id] = 1 + out.append({ + "doc_id": c.doc_id, + "title": c.title, + "citation": c.citation, + "file": c.file, + "page": c.page, + "text": c.text, + "score": float(rerank_score), + "retriever_score": retr_score, + }) + if len(out) >= k: + break + return out + + # Baseline ranker (unchanged behaviour when reranker disabled) + top = np.argsort(-sims)[:k * 3] + out2: list[dict] = [] + seen_per_doc2: dict[str, int] = {} + for i in top: + if sims[i] < min_score: + continue + c = idx["chunks"][i] + if seen_per_doc2.get(c.doc_id, 0) >= 1: + continue + seen_per_doc2[c.doc_id] = 1 + out2.append({ + "doc_id": c.doc_id, + "title": c.title, + "citation": c.citation, + "file": c.file, + "page": c.page, + "text": c.text, + "score": float(sims[i]), + }) + if len(out2) >= k: + break + return out2 diff --git a/app/reconcile.py b/app/reconcile.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ad8a45bd008f6d80eb442de3b04850e66b011d --- /dev/null +++ b/app/reconcile.py @@ -0,0 +1,1213 @@ +"""Document-grounded reconciliation via Granite 4.1 (local Ollama). + +Uses Granite 4.1's native grounded-generation interface: each specialist +that produced data becomes a separate message with role="document ". +Ollama's chat template lifts those into the model's `` system +block and prepends IBM's official grounded-generation system prompt. + +Specialists that didn't fire emit nothing — silence over confabulation. +The model is post-trained to refuse to ground on absent documents. + +A server-side post-check verifies every numeric token in the output appears +verbatim in the source documents. Sentences with ungrounded numbers are +dropped from the rendered paragraph (still recorded in the trace as +unverified for audit). This is the cheapest reliable guardrail against +the worst hallucination class — fabricated stats — and it's deterministic. +""" +from __future__ import annotations + +import logging +import os +import re +from typing import Any + +from app import llm +from app.context import npcc4_slr + +log = logging.getLogger("riprap.reconcile") + +# Reconciliation is the synthesis step — citation discipline + structured +# output adherence both improve materially with the 8b variant. +# RIPRAP_RECONCILER_MODEL is the canonical name; RIPRAP_OLLAMA_MODEL is +# kept as a back-compat fallback. Default is now 8b on production +# deployments (HF Space ships granite4.1:8b in the container). +OLLAMA_MODEL = os.environ.get("RIPRAP_RECONCILER_MODEL", + os.environ.get("RIPRAP_OLLAMA_MODEL", "granite4.1:8b")) + +CITATION_NOAA_TIDES = ("NOAA CO-OPS Tides & Currents API " + "(api.tidesandcurrents.noaa.gov), 6-min cadence") +CITATION_NWS_ALERTS = ("NWS Public Alerts API (api.weather.gov/alerts/active), " + "filtered to flood-relevant event types") +CITATION_NWS_OBS = ("NWS Station Observations API " + "(api.weather.gov/stations//observations/latest)") +CITATION_TTM_FORECAST = ( + "Granite TimeSeries TTM r2 (Ekambaram et al. 2024, NeurIPS) — " + "ibm-granite/granite-timeseries-ttm-r2 via granite-tsfm. " + "Zero-shot forecast of the surge residual (observed minus astronomical " + "tide) at the Battery, NY (NOAA station 8518750). 6-min cadence, " + "~51 h context, ~9.6 h horizon." +) + +# Metadata table for every doc_id the system may emit. Used to build the +# citations array in the final SSE event so the frontend can enrich each +# numbered citation with source/title/url/vintage without re-parsing. +_DOC_META: dict[str, dict] = { + "geocode": {"source": "NYC DCP Geosearch", "title": "Address geocode", "url": "https://geosearch.planninglabs.nyc"}, + "sandy": {"source": "NYC OEM / FEMA", "title": "Sandy Inundation Zone (2012)", "url": "https://data.cityofnewyork.us/Public-Safety/Hurricane-Sandy-Inundation-Zone/5xsi-dfpx"}, + "dep_stormwater": {"source": "NYC DEP", "title": "DEP Stormwater Flood Maps", "url": "https://data.cityofnewyork.us/Environment/DEP-Stormwater-Flood-Projections-Data/d73m-mf6p"}, + "dep_moderate_current": {"source": "NYC DEP", "title": "DEP Stormwater — Moderate Current", "url": "https://data.cityofnewyork.us/Environment/DEP-Stormwater-Flood-Projections-Data/d73m-mf6p"}, + "dep_extreme_2080": {"source": "NYC DEP", "title": "DEP Stormwater — Extreme 2080", "url": "https://data.cityofnewyork.us/Environment/DEP-Stormwater-Flood-Projections-Data/d73m-mf6p"}, + "ida_hwm": {"source": "USGS STN", "title": "Hurricane Ida 2021 High-Water Marks", "url": "https://stn.wim.usgs.gov/FEV/#IdaAug2021"}, + "prithvi_water": {"source": "msradam/Prithvi-EO-2.0-NYC-Pluvial", "title": "Prithvi-EO 2.0 Ida flood polygons", "url": "https://huggingface.co/msradam/Prithvi-EO-2.0-NYC-Pluvial", "vintage": "2021-09"}, + "microtopo": {"source": "USGS 3DEP", "title": "LiDAR microtopography (HAND/TWI)", "url": "https://www.usgs.gov/3d-elevation-program", "vintage": "2018"}, + "floodnet": {"source": "FloodNet NYC", "title": "FloodNet ultrasonic depth sensors", "url": "https://api.floodnet.nyc"}, + "nyc311": {"source": "NYC 311", "title": "311 flood-related complaints", "url": "https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9"}, + "noaa_tides": {"source": "NOAA CO-OPS", "title": "Battery tide gauge water level", "url": "https://tidesandcurrents.noaa.gov/stationhome.html?id=8518750"}, + "nws_alerts": {"source": "NWS", "title": "Active NWS flood alerts", "url": "https://api.weather.gov"}, + "nws_obs": {"source": "NWS ASOS", "title": "NWS hourly precipitation observations", "url": "https://api.weather.gov"}, + "ttm_forecast": {"source": "IBM Granite TTM r2", "title": "Battery surge residual nowcast", "url": "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2"}, + "ttm_311": {"source": "IBM Granite TTM r2", "title": "NYC 311 weekly flood forecast", "url": "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2"}, + "floodnet_forecast":{"source": "FloodNet + IBM Granite TTM r2", "title": "FloodNet sensor recurrence forecast", "url": "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2"}, + "ttm_battery": {"source": "msradam/Granite-TTM-r2-Battery-Surge", "title": "Battery surge fine-tune forecast", "url": "https://huggingface.co/msradam/Granite-TTM-r2-Battery-Surge"}, + "npcc4_slr": {"source": "NPCC4 (2024)", "title": "NYC sea-level rise projections, Battery gauge", "url": "https://nyas.org/npcc4", "vintage": "2024-03"}, + "mta": {"source": "MTA Open Data", "title": "MTA subway entrance flood exposure", "url": "https://data.ny.gov/Transportation/MTA-Subway-Stations/39hk-dx4f"}, + "nycha": {"source": "NYC Open Data / NYCHA", "title": "NYCHA development flood exposure", "url": "https://data.cityofnewyork.us/Housing-Development/NYCHA-Developments/i9rv-hdr5"}, + "doe_schools": {"source": "NYC DOE", "title": "NYC public school flood exposure", "url": "https://data.cityofnewyork.us/Education/School-Point-Locations/jfju-ynrr"}, + "doh_hospitals": {"source": "NYS DOH", "title": "Hospital flood exposure (NYS DOH vn5v-hh5r)", "url": "https://health.data.ny.gov/Health/Health-Facility-General-Information/vn5v-hh5r"}, + "terramind": {"source": "msradam/TerraMind-NYC-Adapters", "title": "TerraMind land cover synthesis", "url": "https://huggingface.co/msradam/TerraMind-NYC-Adapters"}, + "terramind_lulc": {"source": "msradam/TerraMind-NYC-Adapters", "title": "TerraMind LULC classification", "url": "https://huggingface.co/msradam/TerraMind-NYC-Adapters"}, + "terramind_buildings": {"source": "msradam/TerraMind-NYC-Adapters", "title": "TerraMind building footprint analysis", "url": "https://huggingface.co/msradam/TerraMind-NYC-Adapters"}, + "prithvi_live": {"source": "msradam/Prithvi-EO-2.0-NYC-Pluvial", "title": "Prithvi-EO live pluvial flood prediction", "url": "https://huggingface.co/msradam/Prithvi-EO-2.0-NYC-Pluvial"}, + "nta_resolve": {"source": "NYC DCP", "title": "NTA polygon resolution", "url": "https://data.cityofnewyork.us/City-Government/NTA-map/d3qk-pfyz"}, + # RAG policy corpus entries (titles from app/rag.py CORPUS_META) + "rag_dep_2013": {"source": "NYC DEP", "title": "NYC DEP Wastewater Resiliency Plan (2013)"}, + "rag_nycha": {"source": "NYCHA", "title": "Flood Resilience at NYCHA — Lessons Learned"}, + "rag_coned": {"source": "Con Edison", "title": "Con Edison Climate Change Resilience Plan (2023)"}, + "rag_mta": {"source": "MTA", "title": "MTA Climate Resilience Roadmap (October 2025)"}, + "rag_comptroller":{"source": "NYC Comptroller", "title": "Is NYC Ready for Rain? (2024)"}, + # Neighborhood NTA variants (same source, polygon-aggregated) + "sandy_nta": {"source": "NYC OEM / FEMA", "title": "Sandy Inundation Zone — NTA coverage (2012)", "url": "https://data.cityofnewyork.us/Public-Safety/Hurricane-Sandy-Inundation-Zone/5xsi-dfpx"}, + "dep_extreme_2080_nta": {"source": "NYC DEP", "title": "DEP Stormwater — Extreme 2080, NTA coverage", "url": "https://data.cityofnewyork.us/Environment/DEP-Stormwater-Flood-Projections-Data/d73m-mf6p"}, + "dep_moderate_2050_nta": {"source": "NYC DEP", "title": "DEP Stormwater — Moderate 2050, NTA coverage", "url": "https://data.cityofnewyork.us/Environment/DEP-Stormwater-Flood-Projections-Data/d73m-mf6p"}, + "dep_moderate_current_nta": {"source": "NYC DEP", "title": "DEP Stormwater — Moderate Current, NTA coverage", "url": "https://data.cityofnewyork.us/Environment/DEP-Stormwater-Flood-Projections-Data/d73m-mf6p"}, + "nyc311_nta": {"source": "NYC 311", "title": "311 flood complaints — NTA summary", "url": "https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9"}, + "microtopo_nta": {"source": "USGS 3DEP", "title": "LiDAR microtopography — NTA aggregate (HAND/TWI)", "url": "https://www.usgs.gov/3d-elevation-program", "vintage": "2018"}, + "terramind_synthetic": {"source": "msradam/TerraMind-NYC-Adapters", "title": "TerraMind synthetic land-cover prior", "url": "https://huggingface.co/msradam/TerraMind-NYC-Adapters"}, +} + + +def citations_from_docs(doc_msgs: list[dict]) -> list[dict]: + """Build a citations list from the document messages passed to the + reconciler. Each entry has doc_id plus any available metadata. + Order matches the document order (which is Stone canonical order).""" + seen: dict[str, dict] = {} + for msg in doc_msgs: + role = msg.get("role", "") + if not role.startswith("document "): + continue + doc_id = role[len("document "):].strip() + if doc_id in seen: + continue + meta = _DOC_META.get(doc_id, {}) + # rag_ prefixed doc_ids get a generic RAG entry + if not meta and doc_id.startswith("rag_"): + meta = {"source": "Policy corpus (RAG)", "title": doc_id.replace("rag_", "").replace("_", " ").title()} + seen[doc_id] = {"doc_id": doc_id, **meta} + return list(seen.values()) + + +# The Ollama chat template auto-prepends Granite's own grounded-generation +# system suffix once the message list contains role="document" entries. +# This text is OUR additional system prompt, prepended to that suffix. +EXTRA_SYSTEM_PROMPT = """Write a flood-exposure briefing for an NYC address. Use ONLY the facts in the provided documents. + +Output the four sections below, filling each <...> with content drawn only from the documents. **Every sentence that contains a number MUST include a citation tag — such as [sandy], [nyc311], [microtopo], [dep_extreme_2080], [floodnet], [rag_npcc4], etc. — somewhere in that sentence, using the actual document id, not a placeholder.** Cite the specific doc_id exactly as it appears in the documents list. Bold at most one phrase per section using `**...**`. Omit any section whose supporting facts are absent from the documents. + +**Status.** +. + +**Empirical evidence.** +<1-3 sentences citing observed flood evidence: Sandy inundation cites [sandy], 311 complaint counts cite [nyc311], FloodNet sensor readings cite [floodnet], Ida high-water marks cite [ida_hwm], Prithvi flood polygons cite [prithvi_water]>. + +**Modeled scenarios.** +<1-2 sentences citing modeled flooding from the dep_* documents and terrain from [microtopo] (HAND, TWI, percentile)>. + +**Policy context.** +<1 sentence per RAG document hit, citing the agency name and the rag_* doc_id exactly as given>. + +Constraints: +- Copy numerical values verbatim from documents. Do not round. +- Name a specific weather event only if a document explicitly applies it to this address. +- For RAG documents (doc_ids starting with rag_): describe what the report SAYS at the policy or asset-class level. Do not assert findings the report did not make about this specific address. +- Microtopo percentile direction: a LOW percentile means topographic LOW POINT (water pools); HIGH percentile means HIGH GROUND. State the direction correctly or omit the percentile. +- Do NOT write "[doc_id]" literally — always replace it with the real document id. +- If no documents are present, output exactly: No grounded data available for this address. +""" + + +# ---- Hallucination guardrail: numeric grounding post-check ----------------- + +# Numbers must be preceded by whitespace, start-of-string, or punctuation +# OTHER than '-'. This prevents `Extreme-2080` from being parsed as the +# negative number `-2080` (the hyphen is a word separator, not a sign). +_NUM_RE = re.compile(r"(?:(?<=^)|(?<=[\s(\[/]))-?\d[\d,]*(?:\.\d+)?") +_SENTENCE_END_RE = re.compile(r"(?<=[.!?])\s+(?=[A-Z\[])") +# Strings that are too generic OR are well-known NYC system names rather +# than measurements (311, 911 are city service lines, not values). +_TRIVIAL_NUMS = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "100", + "311", "911", "211"} + + +def _normalize_num(s: str) -> set[str]: + """A numeric value can appear in a document with or without commas, with + or without trailing zeros. Return a small set of plausible string + representations to substring-search for.""" + forms = {s} + no_comma = s.replace(",", "") + forms.add(no_comma) + if "." in no_comma: + forms.add(no_comma.rstrip("0").rstrip(".")) + return {f for f in forms if f} + + +def _docs_corpus(doc_msgs: list[dict]) -> str: + """Join all document message contents (and their role suffixes — those + carry the doc_id, which is itself a number-free identifier) into one + big haystack we substring-search for numeric claims.""" + return "\n".join(m.get("content", "") for m in doc_msgs) + + +# Recognise structured-output section headers like `**Status.**` on their +# own line. These are NOT sentences and are kept verbatim. +_SECTION_HEADER_RE = re.compile(r"^\s*\*\*[A-Z][A-Za-z\s/]+\.\*\*\s*$", re.MULTILINE) + +# Granite sometimes emits the four headers inline rather than on their own +# lines (e.g. `**Status.** This address ... **Empirical evidence.** ...`). +# Normalise to one-per-line so the section-renderer regex matches. +_KNOWN_SECTION_HEADERS = ["Status", "Empirical evidence", "Modeled scenarios", + "Policy context"] +_INLINE_HEADER_RE = re.compile( + r"\*\*(" + "|".join(re.escape(h) for h in _KNOWN_SECTION_HEADERS) + r")\.\*\*" +) + + +def _split_inline_headers(text: str) -> str: + """Inject a newline before each `**Header.**` so headers sit on their own + line. The render path and verifier both depend on this.""" + text = _INLINE_HEADER_RE.sub(lambda m: f"\n**{m.group(1)}.**\n", text) + text = re.sub(r"\n{3,}", "\n\n", text) + return text.strip() + + +def _strip_markdown(text: str) -> str: + """Remove bold markers and citation tags so the numeric scan operates on + raw content. Used only for the haystack-substring check, not the rendered + output.""" + text = re.sub(r"\*\*([^*]+)\*\*", r"\1", text) # **bold** -> bold + text = re.sub(r"\[[a-z0-9_]+\]", "", text, flags=re.I) # drop [doc_id] + return text + + +def verify_paragraph(paragraph: str, doc_msgs: list[dict]) -> tuple[str, list[dict]]: + """Drop sentences whose numeric tokens don't appear in any source doc. + + Section-header lines (e.g. `**Status.**`) and inline bold (`**foo**`) + are preserved verbatim; the verifier strips them only for the + numeric-grounding check. Sentences are split on sentence-end + punctuation followed by whitespace + a capital letter or '['. + + Returns (clean_paragraph, dropped_sentences_with_reason). + """ + paragraph = _split_inline_headers(paragraph) + haystack = _docs_corpus(doc_msgs) + out_blocks: list[str] = [] + dropped: list[dict] = [] + body_buf: list[str] = [] + + def flush_body(): + if not body_buf: + return + body = " ".join(body_buf).strip() + body_buf.clear() + if not body: + return + sentences = _SENTENCE_END_RE.split(body) + kept_sents: list[str] = [] + for sent in sentences: + sent_stripped = sent.strip() + if not sent_stripped: + continue + sent_clean = _strip_markdown(sent_stripped) + nums = _NUM_RE.findall(sent_clean) + ungrounded = [] + for n in nums: + if n in _TRIVIAL_NUMS: + continue + forms = _normalize_num(n) + if not any(f in haystack for f in forms): + ungrounded.append(n) + if ungrounded: + dropped.append({"sentence": sent_stripped, + "ungrounded_numbers": ungrounded}) + log.warning("dropped ungrounded sentence: %r (nums: %s)", + sent_stripped, ungrounded) + continue + kept_sents.append(sent_stripped) + if kept_sents: + out_blocks.append(" ".join(kept_sents)) + + for line in paragraph.splitlines(): + if _SECTION_HEADER_RE.match(line): + flush_body() + out_blocks.append(line.strip()) + else: + body_buf.append(line.strip()) + flush_body() + + cleaned = "\n".join(b for b in out_blocks if b).strip() + if not cleaned: + cleaned = "Could not produce a verifiable summary; see the data panels." + return cleaned, dropped + + +def _doc_message(doc_id: str, body_lines: list[str]) -> dict: + """One Granite-native document message. The doc_id rides on the role + suffix; Ollama's template uses it as the document title and lifts the + pair into the block.""" + return {"role": f"document {doc_id}", "content": "\n".join(body_lines)} + + +def trim_docs_to_plan(doc_msgs: list[dict], + planned_specialists: set[str] | None) -> list[dict]: + """Drop document messages whose doc_id family wasn't in the planner's + specialist list. + + The FSM's parallel fan-out runs every specialist regardless of what + the planner asked for; this lets the user see all the data come in + via the trace + map. But for the reconciler we want only what the + planner judged relevant, both to cut prompt tokens (≈30-50% on + typical single_address queries) and to keep the briefing focused. + + Doc IDs are mapped to specialist family prefixes: + sandy -> {sandy} + dep_stormwater -> {dep_*} + floodnet -> {floodnet} + nyc311 -> {nyc311} + microtopo -> {microtopo} + mta_entrances -> {mta_entrance_*} + nycha_developments -> {nycha_dev_*} + doe_schools -> {doe_school_*} + doh_hospitals -> {nyc_hospital_*} # historical id naming + ida_hwm -> {ida_hwm} + prithvi_water -> {prithvi_water} + noaa_tides -> {noaa_tides} + nws_alerts -> {nws_alerts} + nws_obs -> {nws_obs} + ttm_forecast -> {ttm_forecast} + ttm_311_forecast -> {ttm_311_forecast} + floodnet_forecast -> {floodnet_forecast_*} + terramind -> {terramind_*, syn_*} + rag -> {rag_*} + nta_resolve -> {nta_resolve, nta_*} + dob_permits -> {dob_*} + + Always preserved (never trimmed): + geocode, scope_note, nta_resolve — they orient the briefing or + gate scope and the planner doesn't always name them explicitly. + + Set RIPRAP_TRIM_DOCS=0 to disable (defaults on). + """ + import os as _os # local import to keep module top tidy + if not planned_specialists or not doc_msgs: + return doc_msgs + if _os.environ.get("RIPRAP_TRIM_DOCS", "1").lower() in ("0", "false", "no"): + return doc_msgs + + # Build the allowed-prefix set from the planner's specialists. + PREFIXES_BY_SPECIALIST: dict[str, tuple[str, ...]] = { + "sandy": ("sandy",), + "dep_stormwater": ("dep_",), + "floodnet": ("floodnet",), + "nyc311": ("nyc311",), + "microtopo": ("microtopo",), + "ida_hwm": ("ida_hwm",), + "prithvi_water": ("prithvi_water",), + "noaa_tides": ("noaa_tides",), + "nws_alerts": ("nws_alerts",), + "nws_obs": ("nws_obs",), + "ttm_forecast": ("ttm_forecast",), + "ttm_311_forecast": ("ttm_311_forecast",), + "floodnet_forecast": ("floodnet_forecast",), + "ttm_battery_surge": ("ttm_battery",), + "terramind": ("terramind", "syn_"), + "terramind_lulc": ("tm_lulc",), + "terramind_buildings": ("tm_buildings",), + "rag": ("rag_",), + "rag_mta": ("rag_",), + "nta_resolve": ("nta_resolve", "nta_"), + "dob_permits": ("dob_",), + "mta_entrances": ("mta_entrance",), + "nycha_developments": ("nycha_dev",), + "doe_schools": ("doe_school", "nyc_school"), + "doh_hospitals": ("doh_hospital", "nyc_hospital"), + } + ALWAYS_KEEP = ("geocode", "scope_note", "nta_resolve") + + allowed_prefixes: set[str] = set() + for spec in planned_specialists: + for p in PREFIXES_BY_SPECIALIST.get(spec, ()): + allowed_prefixes.add(p) + if not allowed_prefixes: + return doc_msgs # planner gave us nothing matchable; bail safely + + kept: list[dict] = [] + for m in doc_msgs: + role = m.get("role", "") + if not role.startswith("document "): + kept.append(m) + continue + doc_id = role[len("document "):].strip() + if doc_id.startswith(ALWAYS_KEEP): + kept.append(m) + continue + if any(doc_id.startswith(p) for p in allowed_prefixes): + kept.append(m) + return kept + + +def build_documents(state: dict[str, Any]) -> list[dict]: + """Build Granite-native document-role messages, gated so absent + specialists emit no document at all. + + Document emission order follows the Stones grouping: geocode preamble, + then Cornerstone (static hazard record), Keystone (asset register), + Touchstone (live sensors + EO), Lodestone (forecasts), and finally + policy-context retrieval (RAG + GLiNER) as ancillary. The grouping + is also the order they're iterated for prompt building, so the + Capstone (reconciler) sees the four data-Stones in canonical order. + + Scope guard: if the resolved address is OUTSIDE the NYC bbox, only + the geocode + live national specialists emit documents. NYC-specific + layers (Sandy, DEP, FloodNet, NYC 311, microtopo, Ida HWMs, Prithvi, + NYC RAG corpus) are suppressed and a `scope_note` doc is added telling + the reconciler not to invoke NYC content. + """ + docs: list[dict] = [] + + geo = state.get("geocode") or {} + NYC_S, NYC_W, NYC_N, NYC_E = 40.49, -74.27, 40.92, -73.69 + out_of_nyc = ( + geo.get("lat") is not None and geo.get("lon") is not None and not ( + NYC_S <= geo["lat"] <= NYC_N and NYC_W <= geo["lon"] <= NYC_E + ) + ) + + # ---- Preamble: scope_note (out-of-NYC) + geocode ------------------- + if out_of_nyc: + # Compose a single live-conditions snapshot from whatever the + # national specialists produced. This always emits when out_of_nyc, + # even on a calm day, so the reconciler has SOMETHING grounded to + # report instead of only a list of what doesn't apply. + place_label = (geo.get("borough") or geo.get("address") or + f"{geo['lat']:.4f}, {geo['lon']:.4f}") + body = [ + "Source: Riprap planner + national live specialists. Scope " + "guard: this address is OUTSIDE NYC; NYC-specific datasets " + "are not in scope at this location.", + f"Resolved location: {place_label} ({geo['lat']:.4f}, " + f"{geo['lon']:.4f}).", + ] + tides = state.get("noaa_tides") or {} + if tides.get("station_id") and tides.get("error") is None: + tline = (f"NOAA Tides & Currents — nearest gauge: " + f"{tides.get('station_name')} (NOAA " + f"{tides.get('station_id')}, " + f"{tides.get('distance_km')} km from address).") + body.append(tline) + if tides.get("observed_ft_mllw") is not None: + body.append( + f"Observed water level: {tides['observed_ft_mllw']} ft " + f"above MLLW; predicted: " + f"{tides.get('predicted_ft_mllw')} ft; residual " + f"(observed minus predicted): " + f"{tides.get('residual_ft')} ft." + ) + else: + body.append("No water-level observation reported by the " + "gauge in the last poll.") + alerts = state.get("nws_alerts") or {} + body.append( + f"NWS Public Alerts at point: {alerts.get('n_active', 0)} " + "active flood-relevant alert(s)." + ) + if alerts.get("alerts"): + for a in alerts["alerts"][:3]: + body.append( + f"- {a.get('event','?')} (severity " + f"{a.get('severity','?')}, urgency " + f"{a.get('urgency','?')}); expires " + f"{(a.get('expires') or '')[:16]}; area: " + f"{(a.get('areaDesc') or '')[:120]}." + ) + obs = state.get("nws_obs") or {} + if obs.get("station_id") and obs.get("error") is None: + line = (f"Nearest NWS ASOS: {obs.get('station_name')} " + f"({obs.get('station_id')}, " + f"{obs.get('distance_km')} km).") + body.append(line) + if obs.get("precip_last_hour_mm") is not None: + body.append( + f"Precipitation last 1 h: " + f"{obs['precip_last_hour_mm']} mm; last 6 h: " + f"{obs.get('precip_last_6h_mm')} mm." + ) + else: + body.append("No precipitation reported in the last hourly " + "observation.") + ttm = state.get("ttm_forecast") or {} + if ttm.get("available") and ttm.get("interesting"): + body.append( + f"Granite TTM r2 surge forecast at the Battery: peak " + f"residual {ttm.get('forecast_peak_ft')} ft expected in " + f"{ttm.get('forecast_peak_minutes_ahead')} minutes — note " + f"this gauge is in NYC harbor, not local to this address." + ) + docs.append(_doc_message("scope_note", body)) + + if geo: + body = [ + "Source: NYC DCP Geosearch (geosearch.planninglabs.nyc).", + f"Resolved address: {geo['address']}.", + f"Borough: {geo.get('borough') or 'unknown'}.", + f"Coordinates: {geo['lat']:.5f} N, {geo['lon']:.5f} W.", + ] + if geo.get("bbl"): + body.append(f"BBL (tax-lot id): {geo['bbl']}.") + docs.append(_doc_message("geocode", body)) + + # ---- Cornerstone — The Hazard Reader ------------------------------- + # Static record of what NYC's ground remembers about flooding: the + # 2012 Sandy empirical extent, modelled DEP stormwater scenarios, + # 2021 Ida USGS high-water marks, baked Prithvi-EO Ida-attributable + # polygons, and LiDAR microtopography (elevation / HAND / TWI). + + # Gate: only emit the Sandy doc when the address is actually inside the + # 2012 extent. Granite has a strong training prior associating NYC + flood + # + Brooklyn with Sandy and will misread "outside" as "inside" if given + # the chance — silence-over-confabulation rules. + if not out_of_nyc and state.get("sandy") is True: + body = [ + "Source: NYC Sandy Inundation Zone (NYC OpenData 5xsi-dfpx, " + "empirical extent of areas flooded by Hurricane Sandy in 2012).", + "FACT: The address is LOCATED WITHIN this empirical 2012 inundation extent.", + "INTERPRETATION: Hurricane Sandy did flood this address (or this immediate parcel) on October 29-30, 2012. This is a historical fact, not a model prediction.", + "Do not state the opposite. The address is inside the Sandy inundation zone.", + ] + docs.append(_doc_message("sandy", body)) + + dep = state.get("dep") + if not out_of_nyc and dep: + for scen, info in dep.items(): + if info.get("depth_class", 0) > 0: + body = [ + f"Source: {info['citation']}.", + "Address inside scenario footprint: yes.", + f"Modeled depth class: {info['depth_label']}.", + ] + docs.append(_doc_message(scen, body)) + + ida = state.get("ida_hwm") + if not out_of_nyc and ida and (ida.get("n_within_radius") or 0) > 0: + body = [ + "Source: USGS STN Hurricane Ida 2021 high-water marks (Event 312, NY State).", + f"USGS HWMs within {ida['radius_m']} m: {ida['n_within_radius']}.", + ] + if ida.get("max_height_above_gnd_ft") is not None: + body.append(f"Max water height above ground: {ida['max_height_above_gnd_ft']} ft.") + if ida.get("max_elev_ft") is not None: + body.append(f"Max HWM elevation: {ida['max_elev_ft']} ft.") + if ida.get("nearest_dist_m") is not None: + body.append(f"Nearest HWM site: {ida['nearest_site']} ({ida['nearest_dist_m']} m away).") + docs.append(_doc_message("ida_hwm", body)) + + pw = state.get("prithvi_water") + if not out_of_nyc and pw and pw.get("nearest_distance_m") is not None: + body = [ + "Source: Prithvi-EO 2.0 (300M params, NASA/IBM, Apache-2.0). " + "Sen1Floods11 fine-tune for water/flood semantic segmentation, " + "run via TerraTorch on a real Hurricane Ida pre/post HLS Sentinel-2 " + f"pair: {pw['scene_id']} (dates: {pw['scene_date']}).", + "INTERPRETATION: the polygons are pixels classified as water in the " + "post-event scene (2021-09-02, ~12 h after Ida peak rainfall) but NOT " + "in the pre-event reference (2021-08-25). They are candidate " + "Ida-attributable surface inundation.", + f"Address sits inside an Ida-attributable inundation polygon: " + f"{'YES' if pw['inside_water_polygon'] else 'no'}.", + f"Distance to nearest Ida-attributable polygon: {pw['nearest_distance_m']} m.", + f"Distinct Ida-attributable polygons within 500 m: " + f"{pw['n_polygons_within_500m']}.", + "Honest scope: subway entrances and basement apartments — the dominant " + "Ida damage mode in NYC — are not visible to optical satellites. By the " + "Sep 2 16:02 UTC pass much pluvial street water had drained. The signal " + "primarily captures marsh/parkland ponding, riverside spillover, and " + "low-lying inundation that survived ~12 hours.", + ] + docs.append(_doc_message("prithvi_water", body)) + + mt = state.get("microtopo") + if not out_of_nyc and mt: + # Compute a categorical topographic position so Granite can't flip + # the directional reading of the percentile. + p200 = mt["rel_elev_pct_200m"] + if p200 < 25: + position = ("topographic LOW POINT — surface runoff in the " + "200 m neighbourhood routes toward this location") + elif p200 > 75: + position = ("RELATIVELY HIGH GROUND — most of the 200 m " + "neighbourhood is at lower elevation than this address") + else: + position = ("MID-SLOPE — neither a clear low point nor high ground") + body = [ + "Source: USGS 3DEP 30 m DEM (LiDAR-derived) via py3dep, with TWI and HAND derived using whitebox-workflows hydrology toolkit.", + f"Point elevation at this address: {mt['point_elev_m']} m above sea level.", + f"Topographic position relative to surroundings: {position}.", + f"Fraction of cells within 200 m radius that are LOWER in elevation than this address: {mt['rel_elev_pct_200m']}%.", + f"Fraction of cells within 750 m radius that are LOWER in elevation than this address: {mt['rel_elev_pct_750m']}%.", + f"Basin relief (max elevation in 750 m AOI minus address elevation): {mt['basin_relief_m']} m.", + ] + if mt.get("hand_m") is not None: + hand_v = mt["hand_m"] + hand_interp = ( + "very low (sub-meter) — the address sits at or near drainage level" + if hand_v < 1.0 else + "low (1-3 m) — the address is close to the local drainage line" + if hand_v < 3.0 else + "moderate (3-8 m) — typical urban-block elevation above drainage" + if hand_v < 8.0 else + "high (>8 m) — the address sits well above the local drainage network" + ) + body.append( + f"Height Above Nearest Drainage (HAND): {hand_v} m. " + f"Interpretation: {hand_interp}. HAND is the standard hydrology " + f"index for vertical distance from a cell to the nearest channel; " + f"used by USGS, USACE, and InfoWorks ICM." + ) + if mt.get("twi") is not None: + twi_v = mt["twi"] + twi_interp = ( + "low — the cell sheds water; not saturation-prone" + if twi_v < 6 else + "moderate" + if twi_v < 10 else + "high — the cell tends to accumulate water" + if twi_v < 14 else + "very high — saturation-prone terrain" + ) + body.append( + f"Topographic Wetness Index (TWI): {twi_v}. " + f"Interpretation: {twi_interp}. TWI = ln(specific catchment area / tan slope) " + f"is the TOPMODEL framework's saturation propensity metric." + ) + docs.append(_doc_message("microtopo", body)) + + # ---- Keystone — The Asset Register --------------------------------- + # Per-asset documents for transit, housing, education, healthcare, and + # the TerraMind synthetic-prior land-cover (slated to be replaced by + # the NYC-Buildings LoRA in a later commit). Each register specialist + # emits one doc per asset so the reconciler can cite specifically + # (e.g. [mta_entrance_54], [nycha_dev_004]). Caps keep the total + # payload bounded; specialists already truncated to their per-query + # maxes. + mta = state.get("mta_entrances") + if not out_of_nyc and mta and mta.get("available"): + for e in mta.get("entrances", [])[:6]: + sid = e.get("station_id") + body = [ + "Source: MTA Open Data subway entrances " + "+ NYC OEM Sandy 2012 Inundation Zone (5xsi-dfpx) " + "+ NYC DEP Stormwater Flood Maps + USGS 3DEP DEM.", + (f"Station {e.get('station_name')} ({e.get('daytime_routes')}), " + f"entrance type {e.get('entrance_type')}, " + f"{e.get('distance_m')} m from query."), + (f"Entrance elevation {e.get('elevation_m')} m, " + f"HAND (height above nearest drainage) {e.get('hand_m')} m."), + ] + if e.get("inside_sandy_2012"): + body.append("This entrance is inside the 2012 Sandy " + "Inundation Zone (empirical).") + else: + body.append("This entrance is NOT inside the 2012 Sandy " + "Inundation Zone.") + if (e.get("dep_extreme_2080_class") or 0) > 0: + body.append( + f"NYC DEP Extreme-2080 scenario: " + f"{e.get('dep_extreme_2080_label')}.") + if (e.get("dep_moderate_2050_class") or 0) > 0: + body.append( + f"NYC DEP Moderate-2050 scenario: " + f"{e.get('dep_moderate_2050_label')}.") + body.append("ADA-accessible (heuristic from entrance_type): " + f"{'yes' if e.get('ada_accessible') else 'no'}.") + docs.append(_doc_message(f"mta_entrance_{sid}", body)) + + nycha = state.get("nycha_developments") + if not out_of_nyc and nycha and nycha.get("available"): + for d in nycha.get("developments", [])[:4]: + tds = d.get("tds_num") + body = [ + "Source: pre-computed from NYC Open Data NYCHA Developments " + "(phvi-damg) joined to NYC OEM Sandy 2012 Inundation Zone " + "(5xsi-dfpx) + NYC DEP Stormwater Flood Maps + USGS 3DEP DEM.", + (f"NYCHA development {d.get('development')} (TDS {tds}, " + f"{d.get('borough')}), {d.get('distance_m')} m from query."), + (f"Representative-point elevation {d.get('rep_elevation_m')} m, " + f"HAND {d.get('rep_hand_m')} m."), + ] + if d.get("inside_sandy_2012"): + body.append( + "Centroid is inside the 2012 Sandy Inundation Zone " + "(empirical).") + else: + body.append( + "Centroid is outside the 2012 Sandy Inundation Zone.") + c2080 = d.get("dep_extreme_2080_class") or 0 + if c2080 > 0: + body.append( + f"DEP Extreme-2080 scenario at this development: " + f"{d.get('dep_extreme_2080_label')} (depth class {c2080}).") + c2050 = d.get("dep_moderate_2050_class") or 0 + if c2050 > 0: + body.append( + f"DEP Moderate-2050 scenario at this development: " + f"{d.get('dep_moderate_2050_label')} (depth class {c2050}).") + docs.append(_doc_message(f"nycha_dev_{tds}", body)) + + schools = state.get("doe_schools") + if not out_of_nyc and schools and schools.get("available"): + for s in schools.get("schools", [])[:5]: + lc = s.get("loc_code") + body = [ + "Source: NYC DOE Locations Points " + "+ NYC OEM Sandy 2012 Inundation Zone (5xsi-dfpx) " + "+ NYC DEP Stormwater Flood Maps + USGS 3DEP DEM.", + (f"School {s.get('loc_name')} ({lc}, {s.get('address')}, " + f"{s.get('borough')}), {s.get('distance_m')} m from query."), + (f"School-point elevation {s.get('elevation_m')} m, " + f"HAND {s.get('hand_m')} m."), + ] + if s.get("inside_sandy_2012"): + body.append("This school is inside the 2012 Sandy " + "Inundation Zone (empirical).") + else: + body.append("This school is NOT inside the 2012 Sandy " + "Inundation Zone (centroid-point join; " + "building-footprint join is a documented " + "follow-up).") + if (s.get("dep_extreme_2080_class") or 0) > 0: + body.append( + f"NYC DEP Extreme-2080 scenario: " + f"{s.get('dep_extreme_2080_label')}.") + if (s.get("dep_moderate_2050_class") or 0) > 0: + body.append( + f"NYC DEP Moderate-2050 scenario: " + f"{s.get('dep_moderate_2050_label')}.") + docs.append(_doc_message(f"doe_school_{lc}", body)) + + hospitals = state.get("doh_hospitals") + if not out_of_nyc and hospitals and hospitals.get("available"): + for h in hospitals.get("hospitals", [])[:4]: + fid = h.get("fac_id") + body = [ + "Source: NYS DOH Health Facility Certification (vn5v-hh5r) " + "+ NYC OEM Sandy 2012 Inundation Zone (5xsi-dfpx) " + "+ NYC DEP Stormwater Flood Maps + USGS 3DEP DEM.", + (f"Hospital {h.get('facility_name')} (NYS DOH facility " + f"{fid}, {h.get('address')}, {h.get('borough')}), " + f"operator {h.get('operator_name')}, " + f"ownership {h.get('ownership_type')}, " + f"{h.get('distance_m')} m from query."), + (f"Hospital-point elevation {h.get('elevation_m')} m, " + f"HAND {h.get('hand_m')} m."), + ] + if h.get("inside_sandy_2012"): + body.append("This hospital is inside the 2012 Sandy " + "Inundation Zone (empirical).") + else: + body.append("This hospital is NOT inside the 2012 Sandy " + "Inundation Zone (centroid-point join; " + "building-footprint join is a documented " + "follow-up).") + if (h.get("dep_extreme_2080_class") or 0) > 0: + body.append( + f"NYC DEP Extreme-2080 scenario: " + f"{h.get('dep_extreme_2080_label')}.") + if (h.get("dep_moderate_2050_class") or 0) > 0: + body.append( + f"NYC DEP Moderate-2050 scenario: " + f"{h.get('dep_moderate_2050_label')}.") + docs.append(_doc_message(f"nyc_hospital_{fid}", body)) + + # TerraMind synthetic-prior — explicitly fourth epistemic class + # alongside empirical / modeled / proxy. Reconciler narration must + # frame this as "TerraMind generated a plausible land-cover map from + # terrain context", never "imaged" or "reconstructed". Class labels + # are tentative against ESRI Land Cover 2020-2022 schema. Slated for + # replacement by the NYC-Buildings LoRA in a later migration commit. + tm = state.get("terramind") + if not out_of_nyc and tm and tm.get("ok"): + body = [ + "Source: TerraMind 1.0 base (IBM/ESA, Apache-2.0) any-to-any " + "generative foundation model. This is a SYNTHETIC PRIOR, " + "not a measurement: TerraMind generates plausible categorical " + "land-cover maps from terrain context, never observations.", + f"Chain: {' -> '.join(tm.get('tim_chain') or ['DEM', 'LULC_synthetic'])}.", + f"Diffusion steps: {tm.get('diffusion_steps', '?')}.", + f"Diffusion seed (reproducibility): {tm.get('diffusion_seed', '?')}.", + f"Input DEM mean elevation at this address: " + f"{tm.get('dem_mean_m', 0):.2f} m (NYC 30 m LiDAR raster).", + f"Label schema: {tm.get('label_schema', 'ESRI Land Cover, tentative')}.", + f"Dominant synthetic land-cover class: " + f"{tm.get('dominant_class_display') or tm.get('dominant_class', 'unknown')} at " + f"{tm.get('dominant_pct', 0):.1f}% of the 5 km area.", + f"Synthetic class fractions ({tm.get('n_classes_observed', 0)} " + f"classes observed):", + ] + for label, pct in (tm.get("class_fractions") or {}).items(): + body.append(f" - {label}: {pct:.1f}%") + body.extend([ + "synthetic_modality: true", + "Use only the careful framing 'TerraMind generated a " + "plausible synthetic land-cover prior from the terrain " + "context, with class labels tentatively aligned to ESRI " + "schema'. Do NOT claim measurement, imaging, observation, " + "or reconstruction.", + ]) + docs.append(_doc_message("terramind_synthetic", body)) + + # TerraMind-NYC Buildings adapter (msradam/TerraMind-NYC-Adapters, + # Apache-2.0, fine-tuned on NYC building footprints on AMD MI300X). + # Distinct from the synthetic-prior block above — this is a real + # segmentation against the per-query Sentinel-2/1/DEM chip and + # reports an empirical building-footprint area fraction. + tmb = state.get("terramind_buildings") + if not out_of_nyc and tmb and tmb.get("ok"): + body = [ + "Source: msradam/TerraMind-NYC-Adapters (Apache-2.0) — NYC " + "Buildings LoRA on TerraMind 1.0 base, fine-tuned on AMD " + "Instinct MI300X. Test mIoU 0.5511 on held-out NYC chips.", + f"Adapter: {tmb.get('adapter')}.", + f"Predicted building-footprint coverage in chip: " + f"{tmb.get('pct_buildings')}%.", + ] + if tmb.get("n_building_components") is not None: + body.append( + f"Distinct building connected components: " + f"{tmb.get('n_building_components')}." + ) + body.append( + "Class labels: " + ", ".join(tmb.get("class_labels") or []) + + "." + ) + docs.append(_doc_message("tm_buildings", body)) + + # ---- Touchstone — The Live Observer -------------------------------- + # Live sensors and per-query EO that change minute to minute: + # FloodNet ultrasonic depth, NYC 311 flood complaints, NWS hourly + # METAR observations, NOAA tide-gauge water levels, Prithvi-EO + # live water segmentation. The reconciler treats these as right-now + # context, not historical record. + fn = state.get("floodnet") + if not out_of_nyc and fn and fn.get("n_sensors", 0) > 0: + body = [ + "Source: FloodNet NYC ultrasonic depth sensor network (api.floodnet.nyc).", + f"Sensors within {fn['radius_m']} m: {fn['n_sensors']}.", + f"Sensors with labeled flood events in last 3 years: {fn['n_sensors_with_events']}.", + f"Total flood events at those sensors: {fn['n_flood_events_3y']}.", + ] + peak = fn.get("peak_event") + if peak and peak.get("max_depth_mm") is not None: + ts = (peak.get("start_time") or "")[:10] + body.append( + f"Peak event: {peak['max_depth_mm']} mm depth at sensor " + f"{peak['deployment_id']} starting {ts}." + ) + docs.append(_doc_message("floodnet", body)) + + nyc311 = state.get("nyc311") + if not out_of_nyc and nyc311 and nyc311.get("n", 0) > 0: + body = [ + "Source: NYC 311 service requests (Socrata erm2-nwe9, 2010-present).", + f"311 flood-related complaints within {nyc311['radius_m']} m, last {nyc311['years']} years: {nyc311['n']}.", + ] + if nyc311.get("by_descriptor"): + top = "; ".join(f"{k}: {v}" for k, v in nyc311["by_descriptor"].items()) + body.append(f"Top descriptors and counts: {top}.") + if nyc311.get("by_year"): + yrs = ", ".join(f"{y}: {n}" for y, n in nyc311["by_year"].items()) + body.append(f"Per-year counts: {yrs}.") + docs.append(_doc_message("nyc311", body)) + + obs = state.get("nws_obs") + if not out_of_nyc and obs and obs.get("station_id") and obs.get("error") is None and ( + obs.get("precip_last_hour_mm") is not None or + obs.get("precip_last_6h_mm") is not None + ): + body = [ + f"Source: {CITATION_NWS_OBS}.", + f"Nearest hourly METAR station: {obs['station_name']} ({obs['station_id']}, " + f"{obs['distance_km']} km away).", + f"Observation time: {obs.get('obs_time') or 'unknown'}.", + ] + if obs.get("precip_last_hour_mm") is not None: + body.append(f"Precipitation last 1 h: {obs['precip_last_hour_mm']} mm.") + if obs.get("precip_last_3h_mm") is not None: + body.append(f"Precipitation last 3 h: {obs['precip_last_3h_mm']} mm.") + if obs.get("precip_last_6h_mm") is not None: + body.append(f"Precipitation last 6 h: {obs['precip_last_6h_mm']} mm.") + body.append( + "Heavy short-duration rainfall (e.g. >25 mm/h or >50 mm/6 h) is the " + "primary driver of NYC pluvial / sewer-backup flooding; the static " + "DEP scenarios assume specific rainfall intensities." + ) + docs.append(_doc_message("nws_obs", body)) + + tides = state.get("noaa_tides") + if not out_of_nyc and tides and tides.get("observed_ft_mllw") is not None: + body = [ + f"Source: {CITATION_NOAA_TIDES}.", + f"Nearest tide gauge: {tides['station_name']} (NOAA station " + f"{tides['station_id']}, {tides['distance_km']} km away).", + f"Observation time (LST/LDT): {tides.get('obs_time') or 'unknown'}.", + f"Current observed water level above MLLW: {tides['observed_ft_mllw']} ft.", + ] + if tides.get("predicted_ft_mllw") is not None: + body.append( + f"Astronomical tide prediction at the same instant: " + f"{tides['predicted_ft_mllw']} ft above MLLW." + ) + if tides.get("residual_ft") is not None: + interp = ( + "approximately at predicted level" + if abs(tides["residual_ft"]) < 0.5 else + "elevated above prediction (positive residual is consistent with " + "wind-driven setup or storm surge)" + if tides["residual_ft"] > 0 else + "below prediction (negative residual is consistent with offshore wind)" + ) + body.append( + f"Residual (observed minus predicted): {tides['residual_ft']} ft — " + f"{interp}." + ) + body.append( + "Note: this is real-time tidal context for nearby coastal water level. " + "The address itself may be inland — the reading describes the bay/harbor " + "level the gauge is in, not the address." + ) + docs.append(_doc_message("noaa_tides", body)) + + # Per-query Sentinel-2 water-segmentation observation. Distinct from + # `prithvi_water` (the offline 2021 Ida polygons in the Cornerstone + # group) — this one fires against today's imagery and emits a dated + # observation. + plive = state.get("prithvi_live") + if not out_of_nyc and plive and plive.get("ok"): + body = [ + "Source: msradam/Prithvi-EO-2.0-NYC-Pluvial (Apache-2.0) — " + "NYC-Pluvial v2 fine-tune of Prithvi-EO 2.0 trained on AMD " + "Instinct MI300X via AMD Developer Cloud (test flood IoU " + "0.5979). Live segmentation over a Sentinel-2 L2A scene " + "from Microsoft Planetary Computer.", + f"Sentinel-2 scene id: {plive.get('item_id', 'unknown')}.", + f"Observation date: {(plive.get('item_datetime') or 'unknown')[:10]}.", + f"Cloud cover: {plive.get('cloud_cover', 0):.3f}%.", + f"% water within 500 m of address: " + f"{plive.get('pct_water_within_500m', 0):.2f}.", + f"% water across 5 km chip: " + f"{plive.get('pct_water_full', 0):.2f}.", + ] + docs.append(_doc_message("prithvi_live", body)) + + # TerraMind-NYC LULC adapter — current 5-class macro land-cover from + # the per-query Sentinel-2/1/DEM chip. Empirical observation, not the + # synthetic-prior emitted by the legacy `terramind_synthetic` doc. + tml = state.get("terramind_lulc") + if not out_of_nyc and tml and tml.get("ok"): + body = [ + "Source: msradam/TerraMind-NYC-Adapters (Apache-2.0) — NYC " + "LULC LoRA on TerraMind 1.0 base, fine-tuned on AMD " + "Instinct MI300X. Test mIoU 0.5866 on held-out NYC chips.", + f"Adapter: {tml.get('adapter')}.", + f"Dominant land-cover class in chip: " + f"{tml.get('dominant_class')} at {tml.get('dominant_pct')}%.", + "Per-class fractions:", + ] + for label, pct in (tml.get("class_fractions") or {}).items(): + body.append(f" - {label}: {pct}%") + docs.append(_doc_message("tm_lulc", body)) + + # ---- Lodestone — The Projector ------------------------------------- + # Forward-looking signals: NWS public flood alerts, Granite TTM r2 + # zero-shot Battery surge residual, per-address NYC 311 weekly rate, + # FloodNet sensor recurrence. Every cited number here is a forecast. + alerts = state.get("nws_alerts") or {} + active = alerts.get("alerts") or [] + if not out_of_nyc and active: + body = [ + f"Source: {CITATION_NWS_ALERTS}.", + f"Active flood-relevant alerts at this address right now: {len(active)}.", + ] + for a in active[:4]: + body.append( + f"- {a.get('event','(event)')} (severity: {a.get('severity','?')}, " + f"urgency: {a.get('urgency','?')}); issued {a.get('sent','')[:16]}, " + f"expires {a.get('expires','')[:16]}; " + f"sender: {a.get('sender_name','NWS')}; " + f"area: {(a.get('areaDesc') or '')[:120]}." + ) + if a.get("headline"): + body.append(f" Headline (verbatim): {a['headline'][:240]}") + body.append( + "These are official NWS alerts retrieved live; if any FLOOD or " + "FLASH FLOOD WARNING/WATCH is in this list, it applies to the " + "address right now and should be foregrounded." + ) + docs.append(_doc_message("nws_alerts", body)) + + ttm = state.get("ttm_forecast") + if not out_of_nyc and ttm and ttm.get("available") and ttm.get("interesting"): + body = [ + f"Source: {CITATION_TTM_FORECAST}.", + f"Gauge: {ttm['station_name']} (NOAA {ttm['station_id']}, " + f"{ttm.get('distance_km', '?')} km from address — closest of " + "Battery / Kings Point / Sandy Hook).", + f"Context window: {ttm['context_length']} samples (~" + f"{ttm['context_length']*6/60:.1f} h of 6-min residual).", + f"Forecast horizon: {ttm['horizon_steps']} samples (~" + f"{ttm['horizon_steps']*6/60:.1f} h ahead).", + f"Recent residual: {ttm['history_recent_ft']} ft " + f"(residual = observed water level minus astronomical prediction).", + f"Recent peak |residual| in context: {ttm['history_peak_abs_ft']} ft.", + f"Forecast peak residual: {ttm['forecast_peak_ft']} ft, expected " + f"{ttm['forecast_peak_minutes_ahead']} minutes from now " + f"(at {ttm['forecast_peak_time_utc']} UTC).", + "INTERPRETATION: positive residual is a wind-driven setup or " + "storm-surge component on top of the tide; the model predicts the " + "non-tidal part NOAA's astronomical predictor does not cover.", + ] + docs.append(_doc_message("ttm_forecast", body)) + + # Per-address 311 flood-complaint forecast — different time scale, + # different signal entirely. TTM r2 zero-shot on daily counts + # (~17 months of history → ~3 months of forecast). Aggregated to + # weekly for the narration since readers think in weeks. + ttm311 = state.get("ttm_311_forecast") + if not out_of_nyc and ttm311 and ttm311.get("available"): + accel = ('YES — forecast > 50% above recent 30-day baseline' + if ttm311.get('accelerating') + else 'no — forecast in line with recent baseline') + body = [ + "Source: IBM Granite TimeSeries TTM r2 (Ekambaram et al. 2024, " + "NeurIPS) zero-shot forecast on NYC 311 flood-complaint history " + "(Sewer Backup, Catch Basin Clogged/Flooding, Street Flooding, " + "Manhole Overflow) within " + f"{ttm311.get('radius_m', 200)} m of the address.", + f"Context window: {ttm311['days_context']} days " + f"({ttm311['days_context'] // 7} weeks) ending " + f"{ttm311.get('context_window_end', '?')}.", + f"Total complaints in context window: " + f"{ttm311['history_total_complaints']}.", + f"History recent 30-day rate: {ttm311['history_recent_30d_mean']} " + f"complaints/day " + f"(≈{ttm311['history_weekly_equivalent']} per week).", + f"Forecast horizon: {ttm311['days_horizon']} days " + f"({ttm311['days_horizon'] // 7} weeks) ahead.", + f"Forecast rate: {ttm311['forecast_mean_per_day']} complaints/day " + f"(≈{ttm311['forecast_weekly_equivalent']} per week).", + f"Forecast peak day: {ttm311['forecast_peak_day']} complaints, " + f"day +{ttm311['forecast_peak_day_offset']}.", + f"Acceleration cue: {accel}.", + "INTERPRETATION: this is a per-address pattern forecast, not " + "a city-wide trend. Zero-history addresses get a zero-baseline " + "forecast (legitimate); the more relevant cite is when there's " + "a multi-month complaint history that the model is extrapolating.", + ] + docs.append(_doc_message("ttm_311_forecast", body)) + + # FloodNet sensor forecast — TTM r2 on the nearest sensor's + # historical flood-event recurrence. Reuses the (512, 96) + # singleton from ttm_311_forecast — same model class, different + # data stream. Doc id includes the sensor deployment id so the + # citation is unambiguous when multiple sensors are nearby. + fnf = state.get("floodnet_forecast") + if not out_of_nyc and fnf and fnf.get("available"): + accel = ("YES — next-28-day forecast > 50% above prior-28-day " + "observed count" + if fnf.get("accelerating") + else "no — forecast in line with recent baseline") + doc_id = fnf.get("doc_id") or "floodnet_forecast" + body = [ + "Source: FloodNet NYC ultrasonic depth sensor network " + "(api.floodnet.nyc) historical flood events, forecast by " + "IBM Granite TimeSeries TTM r2 (Ekambaram et al. 2024, " + "NeurIPS).", + f"Sensor: {fnf['sensor_name']} (deployment " + f"{fnf['sensor_id']}) at {fnf['sensor_street']}, " + f"{fnf['sensor_borough']}.", + f"Distance from query: {fnf['distance_from_query_m']} m.", + f"History window: {fnf['history_window_days']} days; " + f"{fnf['history_total_events']} flood events observed total, " + f"{fnf['history_recent_28d_events']} in the most recent " + f"28 days.", + f"Forecast horizon: {fnf['forecast_horizon_days']} days.", + f"Forecast next-28-day expected events: " + f"{fnf['forecast_28d_expected_events']}.", + f"Forecast peak day offset: +{fnf['forecast_peak_day_offset']} " + f"(value {fnf['forecast_peak_day_value']}).", + f"Acceleration cue: {accel}.", + "INTERPRETATION: this is a per-sensor recurrence forecast — " + "expected count of labelled flood events at that specific " + "deployment over the horizon, not an above-curb-event " + "probability. CUSP/Brooklyn College operates the sensors and " + "publishes the historical events; this forecast is Riprap's " + "extension to the same dataset, computable per-query.", + ] + docs.append(_doc_message(doc_id, body)) + + # Granite TTM r2 — Battery surge fine-tune (msradam/Granite-TTM-r2- + # Battery-Surge, Apache-2.0, fine-tuned on AMD MI300X). Hourly + # cadence, 96 h horizon — distinct from the existing zero-shot + # ttm_forecast above, which runs at 6-min cadence over a 9.6 h + # horizon. Both can fire on the same query. + tbs = state.get("ttm_battery_surge") + if (not out_of_nyc and tbs and tbs.get("available") + and tbs.get("interesting")): + body = [ + "Source: msradam/Granite-TTM-r2-Battery-Surge (Apache-2.0). " + "Fine-tune of ibm-granite/granite-timeseries-ttm-r2 trained " + "on AMD Instinct MI300X via AMD Developer Cloud. Test MAE " + "0.1091 m, -41% vs persistence and -25% vs zero-shot TTM r2.", + f"Gauge: {tbs['station_name']} (NOAA {tbs['station_id']}).", + f"Context window: {tbs['context_hours']} hours " + f"(~{tbs['context_hours']/24:.1f} days) of hourly surge " + "residual (verified water level minus harmonic tide).", + f"Forecast horizon: {tbs['horizon_hours']} hours " + f"(~{tbs['horizon_hours']/24:.1f} days ahead).", + f"Recent residual: {tbs['history_recent_m']} m.", + f"Recent peak |residual| in context: " + f"{tbs['history_peak_abs_m']} m.", + f"Forecast peak surge residual: {tbs['forecast_peak_m']} m, " + f"expected {tbs['forecast_peak_hours_ahead']} hours from " + f"now (at {tbs['forecast_peak_time_utc']} UTC).", + "INTERPRETATION: positive residual is the meteorological " + "component (storm surge, atmospheric pressure, wind setup) " + "on top of astronomical tide. The Battery is the dominant " + "NYC harbor-entrance gauge — its surge characterises Sandy " + "and Ida conditions citywide.", + ] + docs.append(_doc_message("ttm_battery", body)) + + # NPCC4 sea-level rise projection — static table, always emits for + # NYC addresses. Provides the policy/planning horizon context that + # grounding the "what's coming" section. + slr = state.get("npcc4_slr") + if not out_of_nyc and slr and slr.get("available"): + y2050 = slr["2050"] + y2100 = slr["2100"] + body = [ + f"Source: {npcc4_slr.CITATION}", + "Sea-level rise projections for the Battery Tide Gauge " + "(primary NYC harbor reference), inches above 2000–2004 baseline:", + f"2050 — Low (10th pct): {y2050['10']['in']} in " + f"({y2050['10']['m']} m); " + f"Mid (50th): {y2050['50']['in']} in ({y2050['50']['m']} m); " + f"High (90th): {y2050['90']['in']} in ({y2050['90']['m']} m); " + f"Extreme (99th): {y2050['99']['in']} in ({y2050['99']['m']} m).", + f"2100 — Low (10th pct): {y2100['10']['in']} in " + f"({y2100['10']['m']} m); " + f"Mid (50th): {y2100['50']['in']} in ({y2100['50']['m']} m); " + f"High (90th): {y2100['90']['in']} in ({y2100['90']['m']} m); " + f"Extreme (99th): {y2100['99']['in']} in ({y2100['99']['m']} m).", + "INTERPRETATION: these are harbor-wide projections, not " + "site-specific inundation depths. Local exposure depends on " + "elevation, distance to waterfront, and storm-surge coupling. " + "Use the DEP stormwater and Sandy layers for site-specific " + "flood-zone assignment.", + ] + docs.append(_doc_message("npcc4_slr", body)) + + # ---- Policy context (RAG + GLiNER, ancillary to the four Stones) --- + # Retrieved policy paragraphs and GLiNER typed-entity extractions. + # These don't belong to a specific Stone — they ground the + # briefing's "Policy context" section. + rag_hits = [] if out_of_nyc else (state.get("rag") or []) + for h in rag_hits: + body = [ + f"Source: {h['citation']}, page {h['page']}.", + f"Retrieved passage (verbatim): {h['text']}", + ] + docs.append(_doc_message(h["doc_id"], body)) + + # Per-source structured fields the reconciler can cite as + # [gliner_] in addition to the parent [rag_]. + gliner = (state.get("gliner") or {}) + if not out_of_nyc and gliner: + for source, payload in gliner.items(): + ents = payload.get("entities") or [] + if not ents: + continue + body = [ + f"Source PDF (parent retriever doc_id: {payload.get('rag_doc_id', '?')}, " + f"title: {payload.get('title', '?')}).", + f"Paragraph excerpt: \"{payload.get('paragraph_excerpt', '')}\"", + "Typed entities extracted by GLiNER (verbatim spans):", + ] + for e in ents: + body.append( + f" - [{e['label']}] {e['text']} (score={e.get('score', 0):.2f})" + ) + docs.append(_doc_message(f"gliner_{source}", body)) + + return docs + + +def reconcile(state: dict[str, Any], model: str = OLLAMA_MODEL, + return_audit: bool = False, on_token=None): + """Run Granite reconciliation, then drop sentences with ungrounded numbers. + + If on_token is provided, the model is run in streaming mode and + on_token(delta) is called for each chunk as Granite generates. + + If return_audit=True, returns (paragraph, audit_dict) where audit_dict + has 'raw' (Granite's original output) and 'dropped' (list of dropped + sentences with their ungrounded numeric tokens). + """ + doc_msgs = build_documents(state) + if not doc_msgs: + msg = "No grounded data available for this address." + return (msg, {"raw": msg, "dropped": []}) if return_audit else msg + + messages = doc_msgs + [ + {"role": "system", "content": EXTRA_SYSTEM_PROMPT}, + {"role": "user", "content": "Write the cited paragraph now."}, + ] + # single_address: 13 specialists may fire, doc bodies are short. + # num_ctx 4096 covers ~700 system + ~2500 docs. num_predict 400 caps + # the 4-section briefing at ~300-350 tokens. + OPTS = {"temperature": 0, "num_ctx": 4096, "num_predict": 400} + if on_token is None: + resp = llm.chat(model=model, messages=messages, options=OPTS) + raw = resp["message"]["content"].strip() + else: + chunks: list[str] = [] + for chunk in llm.chat(model=model, messages=messages, stream=True, + options=OPTS): + delta = (chunk.get("message") or {}).get("content") or "" + if delta: + chunks.append(delta) + on_token(delta) + raw = "".join(chunks).strip() + + cleaned, dropped = verify_paragraph(raw, doc_msgs) + if return_audit: + return cleaned, {"raw": raw, "dropped": dropped} + return cleaned diff --git a/app/register_builder.py b/app/register_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..4dd234d04c768add97de2d90fc0f8fc4165eb816 --- /dev/null +++ b/app/register_builder.py @@ -0,0 +1,170 @@ +"""Generic per-asset register builder. + +Runs the same FSM specialists over every asset in a class. Tier 1+2 +get full Granite paragraphs; Tier 3 gets signals only (paragraph +generated on click in the UI). +""" +from __future__ import annotations + +import json +import sys +import time +from collections.abc import Callable +from pathlib import Path +from typing import Any + +import geopandas as gpd + +from app.context import floodnet, microtopo, nyc311 +from app.flood_layers import dep_stormwater, ida_hwm, sandy_inundation +from app.rag import retrieve as rag_retrieve +from app.rag import warm as rag_warm +from app.reconcile import reconcile as run_reconcile +from app.score import score_frame + +ROOT = Path(__file__).resolve().parent.parent +REGISTERS_DIR = ROOT / "data" / "registers" + + +def _build_one(row_meta: dict, geom_2263, lat: float, lon: float, + with_paragraph: bool) -> dict: + pt = gpd.GeoDataFrame(geometry=[geom_2263], crs="EPSG:2263") + sandy = bool(sandy_inundation.join(pt).iloc[0]) + dep = {} + for scen in ["dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current"]: + j = dep_stormwater.join(pt, scen).iloc[0] + dep[scen] = { + "depth_class": int(j["depth_class"]), + "depth_label": j["depth_label"], + "citation": f"NYC DEP Stormwater Flood Map — {dep_stormwater.label(scen)}", + } + fn = floodnet.summary_for_point(lat, lon, 600); fn["radius_m"] = 600 + n311 = nyc311.summary_for_point(lat, lon, 200, 5) + mt_obj = microtopo.microtopo_at(lat, lon) + mt = vars(mt_obj) if mt_obj else None + ida_obj = ida_hwm.summary_for_point(lat, lon, 800) + ida = vars(ida_obj) if ida_obj else None + + snap = { + "geocode": {**row_meta, "lat": lat, "lon": lon}, + "sandy": sandy, "dep": dep, "floodnet": fn, "nyc311": n311, + "microtopo": mt, "ida_hwm": ida, + } + if with_paragraph: + rag_query = (f"flood risk for {row_meta.get('name','')} in " + f"{row_meta.get('borough','')}, NYC; resilience plan, " + f"vulnerability, mitigation") + snap["rag"] = rag_retrieve(rag_query, k=2, min_score=0.55) + para, audit = run_reconcile(snap, return_audit=True) + snap["paragraph"] = para + snap["audit"] = audit + return snap + + +def build_register(asset_class: str, loader: Callable, *, + tier_with_paragraph: tuple[int, ...] = (1, 2), + meta_keys: tuple[str, ...] = ("name", "address", "borough"), + regenerate: bool = False) -> Path: + """Build a register JSON for an asset class. + + Args: + asset_class: short id (also the output filename) + loader: zero-arg callable returning a GeoDataFrame in EPSG:2263 with + point geometry and at least the columns in meta_keys + tier_with_paragraph: which tiers get full Granite reconciliation + meta_keys: which row columns to surface as the geocode-style metadata + """ + out = REGISTERS_DIR / f"{asset_class}.json" + if out.exists() and not regenerate: + print(f"already exists: {out}; pass regenerate=True to rebuild", + file=sys.stderr) + return out + REGISTERS_DIR.mkdir(exist_ok=True, parents=True) + + print(f"loading asset class {asset_class!r}...", file=sys.stderr) + g = loader() + if g.crs is None or g.crs.to_string() != "EPSG:2263": + g = g.to_crs("EPSG:2263") + + # tier each asset off the same rubric (sandy + 3 DEP scenarios) + g["sandy"] = sandy_inundation.join(g).astype(int) + for scen in ["dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current"]: + j = dep_stormwater.join(g, scen) + g[scen] = (j["depth_class"] > 0).astype(int) + g = score_frame(g) + g["lat"] = g.geometry.to_crs("EPSG:4326").y + g["lon"] = g.geometry.to_crs("EPSG:4326").x + + targets = g[g["tier"].isin([1, 2, 3])].copy() + print(f" {len(targets)} of {len(g)} assets at Tier 1-3", file=sys.stderr) + print("warming RAG index...", file=sys.stderr) + rag_warm() + + # Resume support: a partial JSON sits next to the final output. We + # write it after every row, so any blip can be retried without losing + # work. + partial = REGISTERS_DIR / f"{asset_class}.partial.json" + rows: list[dict] = [] + done_keys: set = set() + if partial.exists(): + try: + data = json.loads(partial.read_text()) + rows = data.get("rows", []) + # use lat/lon as the unique key (works for any asset class) + done_keys = {(round(r["lat"], 5), round(r["lon"], 5)) for r in rows} + print(f" resuming with {len(rows)} rows already processed", + file=sys.stderr) + except Exception as e: + print(f" failed to read partial, starting fresh: {e}", + file=sys.stderr) + + t0 = time.time() + for i, (_, row) in enumerate( + targets.sort_values(["score", "name"], ascending=[False, True]).iterrows()): + key = (round(float(row["lat"]), 5), round(float(row["lon"]), 5)) + if key in done_keys: + continue + tier = int(row["tier"]) + with_paragraph = tier in tier_with_paragraph + meta = {k: row.get(k) for k in meta_keys} + try: + snap = _build_one(meta, row["geometry"], + float(row["lat"]), float(row["lon"]), + with_paragraph=with_paragraph) + except Exception as e: + print(f" [{i+1}/{len(targets)}] FAILED tier-{tier} " + f"{str(meta.get('name',''))[:50]} -- {type(e).__name__}: {e}", + file=sys.stderr) + time.sleep(2) # back off on transient errors + continue + + rec: dict[str, Any] = { + **{k: row.get(k) for k in g.columns if k != "geometry"}, + "lat": float(row["lat"]), + "lon": float(row["lon"]), + "score": int(row["score"]), + "tier": tier, + "snap": snap, + } + rows.append(rec) + done_keys.add(key) + # incremental persist + partial.write_text(json.dumps({ + "asset_class": asset_class, + "rows": rows, + }, default=str)) + elapsed = time.time() - t0 + print(f" [{i+1}/{len(targets)}] tier-{tier} " + f"{str(meta.get('name',''))[:50]:<50} " + f"({elapsed:.0f}s elapsed)", file=sys.stderr) + + out.write_text(json.dumps({ + "asset_class": asset_class, + "generated_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"), + "rows": rows, + }, default=str)) + if partial.exists(): + partial.unlink() + print(f"\nwrote {len(rows)} rows -> {out} ({out.stat().st_size // 1024} KB)", + file=sys.stderr) + return out diff --git a/app/registers/__init__.py b/app/registers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/registers/_footprint.py b/app/registers/_footprint.py new file mode 100644 index 0000000000000000000000000000000000000000..9a7c51eecf6a2d0341414522a732fc7dd73fdec2 --- /dev/null +++ b/app/registers/_footprint.py @@ -0,0 +1,84 @@ +"""Buffered point-overlap helpers for the register specialists. + +The four register specialists (MTA entrances, NYCHA developments, +DOE schools, NYS DOH hospitals) all need to test whether an asset +intersects a flood polygon. NYCHA developments are already polygons +(real building-group footprints), so polygon-vs-polygon `intersects` +is correct. The other three are stored as point centroids: + +- MTA entrances are physical entrances; the point is the centerline +- DOE schools are address centroids (administrative point), but the + actual building extends ~50 m around it +- NYS DOH hospitals are address centroids; campuses are 80–250 m wide + +Pure point-in-polygon on the centroid produces false negatives at +the boundary: NYU Langone, Stuyvesant HS, P.S. 89 all sit on +buildings whose footprints overlap the 2012 Sandy zone but whose +recorded centroid points just miss it. + +The honest fix is a join against the actual NYC Building Footprints ++ PLUTO BBL → footprint dataset (~400 MB). That's a separate +ingestion task. This module is the surgical-and-shippable +intermediate fix: buffer the centroid by an asset-class-appropriate +radius, then ask `intersects` against the same Sandy / DEP polygons +the existing helpers use. The `footprint_buffer_m` is recorded in +the specialist output so the trace UI shows what radius was used — +auditability over hidden assumptions. +""" +from __future__ import annotations + +import logging + +log = logging.getLogger("riprap.register.footprint") + +# Per-asset-class footprint buffer (metres). Conservative enough to +# catch known canonical false-negatives (NYU Langone, Stuyvesant HS, +# P.S. 89) without sweeping in obviously-distant buildings. +BUFFER_MTA_ENTRANCE_M = 8 +BUFFER_DOE_SCHOOL_M = 50 +BUFFER_DOH_HOSPITAL_M = 100 + + +def inside_sandy_buffered(lat: float, lon: float, buffer_m: float) -> bool: + """True if the buffer of (lat, lon) by buffer_m metres intersects + the 2012 Sandy Inundation Zone.""" + try: + import geopandas as gpd + from shapely.geometry import Point + + from app.flood_layers import sandy_inundation + # Project before buffering so the buffer is metric. EPSG:2263 + # is NYC State Plane (feet) — convert metres to feet for buffer. + ft = buffer_m * 3.280839895 + pt = gpd.GeoDataFrame( + geometry=[Point(lon, lat)], crs="EPSG:4326" + ).to_crs("EPSG:2263") + pt["geometry"] = pt.geometry.buffer(ft) + return bool(sandy_inundation.join(pt).iloc[0]) + except Exception: + log.exception("buffered sandy join failed") + return False + + +def dep_class_buffered(lat: float, lon: float, buffer_m: float, + scenario: str) -> tuple[int | None, str | None]: + """Max DEP depth class within `buffer_m` of (lat, lon). + + Returns (depth_class, depth_label). Higher class wins on overlap, + matching `dep_stormwater.join`'s semantics. None on failure. + """ + try: + import geopandas as gpd + from shapely.geometry import Point + + from app.flood_layers import dep_stormwater + ft = buffer_m * 3.280839895 + pt = gpd.GeoDataFrame( + geometry=[Point(lon, lat)], crs="EPSG:4326" + ).to_crs("EPSG:2263") + pt["geometry"] = pt.geometry.buffer(ft) + j = dep_stormwater.join(pt, scenario).iloc[0] + return int(j["depth_class"]), str(j["depth_label"]) + except Exception: + log.exception("buffered dep join failed for %s", scenario) + return None, None diff --git a/app/registers/_loader.py b/app/registers/_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..8e8d3520346f61ccf9aa7289c845e3276282b2ab --- /dev/null +++ b/app/registers/_loader.py @@ -0,0 +1,69 @@ +"""Shared loader for pre-built register JSONs in data/registers/. + +Each register specialist (`nycha`, `doe_schools`, `doh_hospitals`, +`mta_entrances`) has a pre-computed JSON catalog of every Tier 1-3 +exposed asset. The catalog is built once by scripts/build_*_register.py +running the full polygon-overlap math; per-query specialists used to +recompute that math against multi-million-polygon GDB layers, which +on the HF Space CPU made `step_nycha` hang for minutes. + +This module provides O(1) cached load + haversine-on-prebuilt-rows +nearest-N retrieval. Per-query latency drops from minutes to ~ms +without losing the exposure semantics — the per-asset flags +(snap.sandy, snap.dep[scen].depth_class, snap.microtopo) were already +computed during the bake. + +Asset classes outside this catalog (truly unexposed assets, tier 0) +are intentionally not surfaced: a Carleton Manor query that returns +"no NYCHA developments at risk within 1 mi" is a more useful +result than "we found 5 inland NYCHA developments with 0% Sandy +overlap." +""" +from __future__ import annotations + +import json +import math +from functools import lru_cache +from pathlib import Path + +REGISTERS_DIR = Path(__file__).resolve().parents[2] / "data" / "registers" + + +@lru_cache(maxsize=8) +def load_register(asset_class: str) -> list[dict]: + """Return the rows list from data/registers/.json. The + caller treats each row as opaque except for the lat/lon fields.""" + p = REGISTERS_DIR / f"{asset_class}.json" + if not p.exists(): + return [] + with open(p) as f: + d = json.load(f) + return list(d.get("rows", [])) + + +def haversine_m(lat1: float, lon1: float, lat2: float, lon2: float) -> float: + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +def nearest_n(asset_class: str, lat: float, lon: float, + radius_m: float, n: int) -> list[tuple[float, dict]]: + """Return up to N rows within radius_m of (lat, lon), sorted by + distance ascending. Each entry is (distance_m, row).""" + rows = load_register(asset_class) + if not rows: + return [] + candidates: list[tuple[float, dict]] = [] + for r in rows: + rlat = r.get("lat") + rlon = r.get("lon") + if rlat is None or rlon is None: + continue + d = haversine_m(lat, lon, float(rlat), float(rlon)) + if d <= radius_m: + candidates.append((d, r)) + candidates.sort(key=lambda t: t[0]) + return candidates[:n] diff --git a/app/registers/doe_schools.py b/app/registers/doe_schools.py new file mode 100644 index 0000000000000000000000000000000000000000..41eb7a6f4b687d7bde9402e87975e978da7843a3 --- /dev/null +++ b/app/registers/doe_schools.py @@ -0,0 +1,212 @@ +"""doe_school_exposure — flood-exposure briefing per NYC public school. + +Point-based register specialist (1992 NYC DOE school points). Same +join pattern as the MTA-entrance specialist. Per queried (lat, lon), +returns up to N schools within `radius_m`, enriched with: + + - inside_sandy_2012 (point-in-polygon, empirical) + - dep_extreme_2080_class (point-in-polygon, modeled) + - dep_moderate_2050_class (point-in-polygon, modeled) + - elevation_m (USGS 3DEP DEM, proxy) + - hand_m (derived HAND raster, proxy) + +doc_id format: `doe_school_`. Schools are physical +buildings that serve as evacuation hubs in city OEM plans, so +"this school sits inside the 2012 Sandy zone" is a structural +claim that's directly relevant to flood planning. +""" + +from __future__ import annotations + +import json +import logging +import math +import sys +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +from app.registers._footprint import ( + BUFFER_DOE_SCHOOL_M, + dep_class_buffered, + inside_sandy_buffered, +) + +_ROOT = Path(__file__).resolve().parents[2] +if str(_ROOT) not in sys.path: + sys.path.insert(0, str(_ROOT)) + +log = logging.getLogger("riprap.doe_school") + +DATA = _ROOT / "data" +SCHOOLS = DATA / "schools.geojson" + +DEFAULT_RADIUS_M = 1500 +DEFAULT_MAX_PER_QUERY = 6 + +BORO_NAME = {"1": "MANHATTAN", "2": "BRONX", "3": "BROOKLYN", + "4": "QUEENS", "5": "STATEN ISLAND"} + +MANAGED_BY_LABEL = {"1": "DOE-managed", "2": "Charter or other"} + + +@dataclass +class SchoolFinding: + loc_code: str + loc_name: str + address: str + borough: str + bin: str + bbl: str + managed_by: str + school_lat: float + school_lon: float + distance_m: float + elevation_m: float | None + hand_m: float | None + inside_sandy_2012: bool + dep_extreme_2080_class: int | None + dep_extreme_2080_label: str | None + dep_moderate_2050_class: int | None + dep_moderate_2050_label: str | None + + +def _haversine_m(lat1, lon1, lat2, lon2) -> float: + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +@lru_cache(maxsize=1) +def _load_schools(): + import geopandas as gpd + gdf = gpd.read_file(SCHOOLS) + gdf["lat"] = gdf.geometry.y + gdf["lon"] = gdf.geometry.x + return gdf.reset_index(drop=True) + + +def _schools_near(lat: float, lon: float, radius_m: float): + gdf = _load_schools() + deg = radius_m / 90_000 + sub = gdf[(gdf["lat"].between(lat - deg, lat + deg)) + & (gdf["lon"].between(lon - deg, lon + deg))].copy() + if sub.empty: + return sub + sub["distance_m"] = sub.apply( + lambda r: _haversine_m(lat, lon, r["lat"], r["lon"]), axis=1) + return sub[sub["distance_m"] <= radius_m].sort_values("distance_m") + + +def _sample_raster(raster_path: Path, lat: float, lon: float) -> float | None: + if not raster_path.exists(): + return None + try: + import rasterio + with rasterio.open(raster_path) as src: + v = next(src.sample([(lon, lat)]))[0] + v = float(v) + if math.isnan(v) or v == src.nodata: + return None + return v + except Exception: + log.exception("raster sample failed for %s", raster_path) + return None + + +def _inside_sandy(lat: float, lon: float) -> bool: + return inside_sandy_buffered(lat, lon, BUFFER_DOE_SCHOOL_M) + + +def _dep_class(lat: float, lon: float, scenario: str): + return dep_class_buffered(lat, lon, BUFFER_DOE_SCHOOL_M, scenario) + + +def summary_for_point(lat: float, lon: float, + radius_m: float = DEFAULT_RADIUS_M, + max_schools: int = DEFAULT_MAX_PER_QUERY) -> dict: + """N nearest tier-1-3 DOE schools to (lat, lon), with pre-computed + exposure flags read from data/registers/schools.json. The bake + script runs the buffered point-in-polygon math citywide once; + per-query work is haversine + dict lookup.""" + from app.registers._loader import nearest_n + hits = nearest_n("schools", lat, lon, radius_m, max_schools) + if not hits: + return {"available": False, + "n_schools": 0, + "radius_m": radius_m, + "schools": []} + + findings: list[SchoolFinding] = [] + for distance_m, row in hits: + snap = row.get("snap") or {} + dep = snap.get("dep") or {} + microtopo = snap.get("microtopo") or {} + + def _depth(scen: str) -> tuple[int | None, str | None]: + d = dep.get(scen) or {} + cls = d.get("depth_class") + lbl = d.get("depth_label") + return (int(cls) if cls is not None else None, + str(lbl) if lbl else None) + + d80c, d80l = _depth("dep_extreme_2080") + d50c, d50l = _depth("dep_moderate_2050") + elev = microtopo.get("point_elev_m") + hand = microtopo.get("aoi_hand_m") or microtopo.get("hand_m") + + findings.append(SchoolFinding( + loc_code=str(row.get("loc_code", "")), + loc_name=str(row.get("name", "")), + address=str(row.get("address", "")).strip(), + borough=str(row.get("borough", "")), + bin=str(row.get("bin", "")), + bbl=str(row.get("bbl", "")), + managed_by="DOE-managed", + school_lat=round(float(row["lat"]), 5), + school_lon=round(float(row["lon"]), 5), + distance_m=round(distance_m, 1), + elevation_m=round(float(elev), 2) if elev is not None else None, + hand_m=round(float(hand), 2) if hand is not None else None, + inside_sandy_2012=bool(snap.get("sandy")), + dep_extreme_2080_class=d80c, + dep_extreme_2080_label=d80l, + dep_moderate_2050_class=d50c, + dep_moderate_2050_label=d50l, + )) + + n_in_sandy = sum(1 for f in findings if f.inside_sandy_2012) + n_dep_2080 = sum(1 for f in findings + if (f.dep_extreme_2080_class or 0) > 0) + return { + "available": True, + "n_schools": len(findings), + "radius_m": radius_m, + "footprint_buffer_m": BUFFER_DOE_SCHOOL_M, + "n_inside_sandy_2012": n_in_sandy, + "n_in_dep_extreme_2080": n_dep_2080, + "schools": [vars(f) for f in findings], + "citation": ("Pre-computed from NYC DOE Locations Points joined " + "to Sandy 2012 Inundation Zone (5xsi-dfpx) + " + "NYC DEP Stormwater Flood Maps + USGS 3DEP DEM. " + "See data/registers/schools.json."), + } + + +def main() -> int: + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--lat", type=float, required=True) + ap.add_argument("--lon", type=float, required=True) + ap.add_argument("--radius", type=float, default=DEFAULT_RADIUS_M) + ap.add_argument("--max", type=int, default=DEFAULT_MAX_PER_QUERY) + args = ap.parse_args() + s = summary_for_point(args.lat, args.lon, args.radius, args.max) + print(json.dumps(s, indent=2, default=str)) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/app/registers/doh_hospitals.py b/app/registers/doh_hospitals.py new file mode 100644 index 0000000000000000000000000000000000000000..53c5901d83fc3d854cfd30d1bdc380f297100c52 --- /dev/null +++ b/app/registers/doh_hospitals.py @@ -0,0 +1,233 @@ +"""nys_doh_hospital_exposure — flood-exposure briefing per NYC hospital. + +Point-based register specialist on 67 NYC hospitals from the NYS DOH +Health Facility Certification Information dataset (Article 28 +hospitals only, filtered to the 5 NYC counties). Same join pattern +as MTA entrances and DOE schools. + +Hospitals are essential infrastructure: a hospital inside the 2012 +Sandy Inundation Zone tells planners and emergency-management +audiences something concrete about lifeline-asset exposure. NYU +Langone, Bellevue, and Coney Island Hospital all evacuated patients +during Sandy — those events are public-record and well-documented. + +doc_id format: `nyc_hospital_` (NYS DOH facility ID). +""" + +from __future__ import annotations + +import json +import logging +import math +import sys +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +from app.registers._footprint import ( + BUFFER_DOH_HOSPITAL_M, + dep_class_buffered, + inside_sandy_buffered, +) + +_ROOT = Path(__file__).resolve().parents[2] +if str(_ROOT) not in sys.path: + sys.path.insert(0, str(_ROOT)) + +log = logging.getLogger("riprap.hospital") + +DATA = _ROOT / "data" +HOSPITALS = DATA / "hospitals.geojson" + +DEFAULT_RADIUS_M = 3000 # hospitals are sparse; wider radius +DEFAULT_MAX_PER_QUERY = 5 + +COUNTY_TO_BOROUGH = { + "New York": "MANHATTAN", "Kings": "BROOKLYN", "Bronx": "BRONX", + "Queens": "QUEENS", "Richmond": "STATEN ISLAND", +} + + +@dataclass +class HospitalFinding: + fac_id: str + facility_name: str + address: str + borough: str + operator_name: str + ownership_type: str + hospital_lat: float + hospital_lon: float + distance_m: float + elevation_m: float | None + hand_m: float | None + inside_sandy_2012: bool + dep_extreme_2080_class: int | None + dep_extreme_2080_label: str | None + dep_moderate_2050_class: int | None + dep_moderate_2050_label: str | None + + +def _haversine_m(lat1, lon1, lat2, lon2) -> float: + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +@lru_cache(maxsize=1) +def _load_hospitals(): + import geopandas as gpd + gdf = gpd.read_file(HOSPITALS) + gdf["lat"] = gdf.geometry.y + gdf["lon"] = gdf.geometry.x + return gdf.reset_index(drop=True) + + +def _hospitals_near(lat: float, lon: float, radius_m: float): + gdf = _load_hospitals() + deg = radius_m / 90_000 + sub = gdf[(gdf["lat"].between(lat - deg, lat + deg)) + & (gdf["lon"].between(lon - deg, lon + deg))].copy() + if sub.empty: + return sub + sub["distance_m"] = sub.apply( + lambda r: _haversine_m(lat, lon, r["lat"], r["lon"]), axis=1) + return sub[sub["distance_m"] <= radius_m].sort_values("distance_m") + + +def _sample_raster(raster_path: Path, lat: float, lon: float) -> float | None: + if not raster_path.exists(): + return None + try: + import rasterio + with rasterio.open(raster_path) as src: + v = next(src.sample([(lon, lat)]))[0] + v = float(v) + if math.isnan(v) or v == src.nodata: + return None + return v + except Exception: + log.exception("raster sample failed for %s", raster_path) + return None + + +def _inside_sandy(lat: float, lon: float) -> bool: + return inside_sandy_buffered(lat, lon, BUFFER_DOH_HOSPITAL_M) + + +def _dep_class(lat: float, lon: float, scenario: str): + return dep_class_buffered(lat, lon, BUFFER_DOH_HOSPITAL_M, scenario) + + +_DEPTH_LABEL = { + 0: "outside", + 1: "Nuisance (>4 in to 1 ft)", + 2: "Deep & Contiguous (1-4 ft)", + 3: "Deep Contiguous (>4 ft)", +} + + +def _exposure_at(lat: float, lon: float) -> tuple[bool, dict]: + """Use baked Cornerstone rasters for fast per-point exposure lookup. + Returns (inside_sandy, {scen: (depth_class, depth_label)}). Falls + back to the legacy buffered GDB join if rasters absent.""" + try: + import geopandas as gpd + from shapely.geometry import Point + + from app.flood_layers import dep_stormwater, sandy_inundation + pt = (gpd.GeoDataFrame(geometry=[Point(lon, lat)], crs="EPSG:4326") + .to_crs("EPSG:2263").iloc[0].geometry) + in_sandy = sandy_inundation.inside_raster(pt) + deps = {} + for scen in ("dep_extreme_2080", "dep_moderate_2050"): + cls = dep_stormwater.join_raster(pt, scen) + deps[scen] = (cls, _DEPTH_LABEL.get(cls, "outside")) + return in_sandy, deps + except Exception: + log.exception("raster exposure lookup failed; falling back") + in_sandy = _inside_sandy(lat, lon) + d80c, d80l = _dep_class(lat, lon, "dep_extreme_2080") + d50c, d50l = _dep_class(lat, lon, "dep_moderate_2050") + return in_sandy, { + "dep_extreme_2080": (d80c, d80l), + "dep_moderate_2050": (d50c, d50l), + } + + +def summary_for_point(lat: float, lon: float, + radius_m: float = DEFAULT_RADIUS_M, + max_hospitals: int = DEFAULT_MAX_PER_QUERY) -> dict: + """N nearest hospitals to (lat, lon), with exposure flags computed + via Cornerstone baked rasters. Hospitals have no pre-built register + (small enough at ~150 entries to not need one), so we read the + full GeoJSON and sample the rasters per-hit. Sub-ms per query.""" + near = _hospitals_near(lat, lon, radius_m) + if near.empty: + return {"available": False, + "n_hospitals": 0, + "radius_m": radius_m, + "hospitals": []} + + near = near.head(max_hospitals) + findings: list[HospitalFinding] = [] + for _, row in near.iterrows(): + hlat, hlon = float(row["lat"]), float(row["lon"]) + elev = _sample_raster(DATA / "nyc_dem_30m.tif", hlat, hlon) + hand = _sample_raster(DATA / "hand.tif", hlat, hlon) + in_sandy, deps = _exposure_at(hlat, hlon) + d80c, d80l = deps["dep_extreme_2080"] + d50c, d50l = deps["dep_moderate_2050"] + findings.append(HospitalFinding( + fac_id=str(row["fac_id"]), + facility_name=str(row["facility_name"]), + address=f"{row['address1']}, {row['city']}".strip(", "), + borough=COUNTY_TO_BOROUGH.get(str(row["county"]), str(row["county"])), + operator_name=str(row["operator_name"]), + ownership_type=str(row["ownership_type"]), + hospital_lat=round(hlat, 5), + hospital_lon=round(hlon, 5), + distance_m=round(float(row["distance_m"]), 1), + elevation_m=round(elev, 2) if elev is not None else None, + hand_m=round(hand, 2) if hand is not None else None, + inside_sandy_2012=in_sandy, + dep_extreme_2080_class=d80c, + dep_extreme_2080_label=d80l, + dep_moderate_2050_class=d50c, + dep_moderate_2050_label=d50l, + )) + + n_in_sandy = sum(1 for f in findings if f.inside_sandy_2012) + n_dep_2080 = sum(1 for f in findings + if (f.dep_extreme_2080_class or 0) > 0) + return { + "available": True, + "n_hospitals": len(findings), + "radius_m": radius_m, + "footprint_buffer_m": BUFFER_DOH_HOSPITAL_M, + "n_inside_sandy_2012": n_in_sandy, + "n_in_dep_extreme_2080": n_dep_2080, + "hospitals": [vars(f) for f in findings], + "citation": ("NYS DOH Health Facility Certification (vn5v-hh5r) + " + "NYC OEM Sandy 2012 Inundation Zone (5xsi-dfpx) + " + "NYC DEP Stormwater Flood Maps + USGS 3DEP DEM"), + } + + +def main() -> int: + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--lat", type=float, required=True) + ap.add_argument("--lon", type=float, required=True) + ap.add_argument("--radius", type=float, default=DEFAULT_RADIUS_M) + ap.add_argument("--max", type=int, default=DEFAULT_MAX_PER_QUERY) + args = ap.parse_args() + s = summary_for_point(args.lat, args.lon, args.radius, args.max) + print(json.dumps(s, indent=2, default=str)) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/app/registers/mta_entrances.py b/app/registers/mta_entrances.py new file mode 100644 index 0000000000000000000000000000000000000000..056dcd6abc773c08e27c4d127d9703fa5633398e --- /dev/null +++ b/app/registers/mta_entrances.py @@ -0,0 +1,243 @@ +"""mta_entrance_exposure — flood-exposure briefing per subway entrance. + +The headline new specialist for the IBM senior technical staffer's +"subway entrances" reaction. Joins: + + - MTA Open Data subway-entrance geometry (data/mta_entrances.geojson, + 2120 entrances city-wide). + - NYC OEM Sandy 2012 Inundation Zone (data/sandy_inundation.geojson) + — empirical evidence (a flood actually happened here). + - NYC DEP Stormwater Flood Maps for Extreme-2080, Moderate-2050, + Moderate-current scenarios — modeled evidence. + - USGS 3DEP DEM (data/nyc_dem_30m.tif) for entrance-level elevation. + - HAND raster (data/hand.tif) for height above nearest drainage. + - Entrance type → ADA-status heuristic (Elevator / Ramp = accessible). + +Per queried address, returns the entrances within a configurable +radius (default 800 m) with structured per-entrance claims the +reconciler can cite. doc_id format: `mta_entrance_`. + +Honest scope (per Riprap discipline): + - This is an EXPOSURE specialist, not a damage forecast. We say + "this entrance sits inside the 2012 Sandy zone" — we don't say + "this entrance will flood again in the next storm". + - The Sandy / DEP layers are point-in-polygon over public-record + geometry; ADA status from the MTA Open Data `entrance_type` + column is a heuristic, not the authoritative MTA accessibility + list. + - Documented MTA Sandy-recovery records for specific stations are + NOT included in this first cut — only the empirical-inundation + membership. Adding station-level recovery citations requires + parsing the MTA's "Hurricane Sandy: Three Years Later" report + and is a follow-up. +""" + +from __future__ import annotations + +import json +import logging +import math +import sys +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +from app.registers._footprint import ( + BUFFER_MTA_ENTRANCE_M, + dep_class_buffered, + inside_sandy_buffered, +) + +# Ensure `app/` is importable when this experiment is invoked directly +# from its own subdir. +_ROOT = Path(__file__).resolve().parents[2] +if str(_ROOT) not in sys.path: + sys.path.insert(0, str(_ROOT)) + +log = logging.getLogger("riprap.mta_entrance") + +DATA = Path(__file__).resolve().parents[2] / "data" +MTA_ENTRANCES = DATA / "mta_entrances.geojson" + +ADA_ACCESSIBLE_TYPES = {"Elevator", "Ramp"} + +DEFAULT_RADIUS_M = 800 +DEFAULT_MAX_PER_QUERY = 8 # cap per station so doc payload stays small + + +@dataclass +class EntranceFinding: + station_id: str + station_name: str + daytime_routes: str + borough: str + entrance_type: str + entrance_lat: float + entrance_lon: float + distance_m: float + ada_accessible: bool + elevation_m: float | None + hand_m: float | None # height above nearest drainage + inside_sandy_2012: bool + dep_extreme_2080_class: int | None # 0/1/2/3 + dep_extreme_2080_label: str | None + dep_moderate_2050_class: int | None + dep_moderate_2050_label: str | None + + +def _haversine_m(lat1, lon1, lat2, lon2) -> float: + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +@lru_cache(maxsize=1) +def _load_entrances(): + import geopandas as gpd + import pandas as pd + gdf = gpd.read_file(MTA_ENTRANCES) + # The lat/lon columns are strings in this GeoJSON; coerce so we + # can do range comparisons in the bbox prefilter. + gdf["entrance_latitude"] = pd.to_numeric(gdf["entrance_latitude"], + errors="coerce") + gdf["entrance_longitude"] = pd.to_numeric(gdf["entrance_longitude"], + errors="coerce") + gdf = gdf[gdf["entrance_latitude"].notna() + & gdf["entrance_longitude"].notna()].copy() + return gdf.reset_index(drop=True) + + +def _entrances_near(lat: float, lon: float, radius_m: float): + gdf = _load_entrances() + # Coarse bbox prefilter to avoid haversine on 2120 rows every call. + deg = radius_m / 90_000 # generous degree padding at NYC latitude + sub = gdf[ + (gdf["entrance_latitude"].between(lat - deg, lat + deg)) + & (gdf["entrance_longitude"].between(lon - deg, lon + deg)) + ].copy() + if sub.empty: + return sub + sub["distance_m"] = sub.apply( + lambda r: _haversine_m(lat, lon, r["entrance_latitude"], + r["entrance_longitude"]), + axis=1, + ) + sub = sub[sub["distance_m"] <= radius_m].sort_values("distance_m") + return sub + + +def _sample_raster(raster_path: Path, lat: float, lon: float) -> float | None: + """Read one pixel from a raster at (lat, lon). Returns None if the + point is outside the raster or the raster is missing. + + The cached NYC rasters are all EPSG:4326. rasterio.sample handles + coordinate-to-pixel translation directly — simpler than building + a windowed read.""" + if not raster_path.exists(): + return None + try: + import rasterio + with rasterio.open(raster_path) as src: + v = next(src.sample([(lon, lat)]))[0] + if v is None: + return None + v = float(v) + if math.isnan(v) or v == src.nodata: + return None + return v + except Exception: + log.exception("raster sample failed for %s", raster_path) + return None + + +def _inside_sandy(lat: float, lon: float) -> bool: + """Sandy join with a small (8 m) buffer to capture entrances at the + polygon edge — the entrance point is the centerline of the stair + well, the actual opening is wider.""" + return inside_sandy_buffered(lat, lon, BUFFER_MTA_ENTRANCE_M) + + +def _dep_class(lat: float, lon: float, scenario: str) -> tuple[int | None, str | None]: + return dep_class_buffered(lat, lon, BUFFER_MTA_ENTRANCE_M, scenario) + + +def summary_for_point(lat: float, lon: float, + radius_m: float = DEFAULT_RADIUS_M, + max_entrances: int = DEFAULT_MAX_PER_QUERY) -> dict: + """Return all subway entrances within `radius_m` of (lat, lon), + enriched with flood-exposure fields. Empty list when no entrances + are nearby (silence over confabulation).""" + near = _entrances_near(lat, lon, radius_m) + if near.empty: + return {"available": False, + "n_entrances": 0, + "radius_m": radius_m, + "entrances": []} + + near = near.head(max_entrances) + findings: list[EntranceFinding] = [] + for _, row in near.iterrows(): + elat, elon = float(row["entrance_latitude"]), float(row["entrance_longitude"]) + ada = str(row["entrance_type"]) in ADA_ACCESSIBLE_TYPES + elev = _sample_raster(DATA / "nyc_dem_30m.tif", elat, elon) + hand = _sample_raster(DATA / "hand.tif", elat, elon) + in_sandy = _inside_sandy(elat, elon) + dep_2080_class, dep_2080_label = _dep_class(elat, elon, "dep_extreme_2080") + dep_2050_class, dep_2050_label = _dep_class(elat, elon, "dep_moderate_2050") + findings.append(EntranceFinding( + station_id=str(row["station_id"]), + station_name=str(row["stop_name"]), + daytime_routes=str(row["daytime_routes"]), + borough=str(row["borough"]), + entrance_type=str(row["entrance_type"]), + entrance_lat=elat, entrance_lon=elon, + distance_m=round(float(row["distance_m"]), 1), + ada_accessible=ada, + elevation_m=round(elev, 2) if elev is not None else None, + hand_m=round(hand, 2) if hand is not None else None, + inside_sandy_2012=in_sandy, + dep_extreme_2080_class=dep_2080_class, + dep_extreme_2080_label=dep_2080_label, + dep_moderate_2050_class=dep_2050_class, + dep_moderate_2050_label=dep_2050_label, + )) + + # Citywide rollups across the returned entrances. + n_in_sandy = sum(1 for f in findings if f.inside_sandy_2012) + n_in_dep_2080 = sum(1 for f in findings + if (f.dep_extreme_2080_class or 0) > 0) + n_ada = sum(1 for f in findings if f.ada_accessible) + return { + "available": True, + "n_entrances": len(findings), + "radius_m": radius_m, + "footprint_buffer_m": BUFFER_MTA_ENTRANCE_M, + "n_inside_sandy_2012": n_in_sandy, + "n_in_dep_extreme_2080": n_in_dep_2080, + "n_ada_accessible": n_ada, + "entrances": [vars(f) for f in findings], + "citation": ("MTA Open Data subway entrances + NYC OEM Sandy 2012 " + "Inundation Zone (5xsi-dfpx) + NYC DEP Stormwater " + "Flood Maps + USGS 3DEP DEM"), + } + + +def main() -> int: + """CLI smoke test.""" + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--lat", type=float, required=True) + ap.add_argument("--lon", type=float, required=True) + ap.add_argument("--radius", type=float, default=DEFAULT_RADIUS_M) + ap.add_argument("--max", type=int, default=DEFAULT_MAX_PER_QUERY) + args = ap.parse_args() + s = summary_for_point(args.lat, args.lon, args.radius, args.max) + print(json.dumps(s, indent=2, default=str)) + return 0 + + +if __name__ == "__main__": + import sys + sys.exit(main()) diff --git a/app/registers/nycha.py b/app/registers/nycha.py new file mode 100644 index 0000000000000000000000000000000000000000..374eb16a7da1b78118b8e0b5e76a42a036b515e5 --- /dev/null +++ b/app/registers/nycha.py @@ -0,0 +1,293 @@ +"""nycha_development_exposure — flood-exposure briefing per NYCHA development. + +Same pattern as the MTA-entrance specialist, but NYCHA developments are +*polygons* not points, so the metrics shift to overlap fractions: + + - % of footprint inside the 2012 Sandy Inundation Zone (empirical) + - % of footprint inside DEP Extreme-2080 / Moderate-2050 scenarios + (modeled, broken out by depth class) + - Representative-point elevation, HAND, TWI (proxy) + - Footprint area (km²) + - Distance from query point to development boundary + +Joins: + - data/nycha.geojson (NYC Open Data, 218 NYCHA developments) + - data/sandy_inundation.geojson + - DEP Stormwater Flood Map polygons (3 scenarios) + - data/nyc_dem_30m.tif, data/hand.tif + +Per queried (lat, lon), returns developments whose centroid is within +the radius (default 2000 m — NYCHA developments are sparser than +subway entrances, so the radius is wider). + +Honest scope: + - This is exposure, not damage forecast. We say "85% of this + development's footprint is inside the 2012 Sandy zone" — not + "this development will flood next storm". + - All overlap fractions are computed in EPSG:2263 (NYC State Plane, + feet) for accurate area arithmetic in the city. +""" + +from __future__ import annotations + +import json +import logging +import math +import sys +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +_ROOT = Path(__file__).resolve().parents[2] +if str(_ROOT) not in sys.path: + sys.path.insert(0, str(_ROOT)) + +log = logging.getLogger("riprap.nycha") + +DATA = _ROOT / "data" +NYCHA = DATA / "nycha.geojson" + +DEFAULT_RADIUS_M = 2000 +DEFAULT_MAX_PER_QUERY = 5 + + +@dataclass +class DevelopmentFinding: + development: str + tds_num: str + borough: str + centroid_lat: float + centroid_lon: float + distance_m: float + rep_elevation_m: float | None + rep_hand_m: float | None + inside_sandy_2012: bool + dep_extreme_2080_class: int # 0=outside, 1/2/3 = depth class + dep_extreme_2080_label: str + dep_moderate_2050_class: int + dep_moderate_2050_label: str + dep_moderate_current_class: int + dep_moderate_current_label: str + + +@lru_cache(maxsize=1) +def _load_nycha(): + import geopandas as gpd + gdf = gpd.read_file(NYCHA).to_crs("EPSG:2263") # feet, accurate areas + gdf["centroid_2263"] = gdf.geometry.centroid + return gdf.reset_index(drop=True) + + +@lru_cache(maxsize=1) +def _load_sandy_2263(): + """Load the Sandy zone in EPSG:2263 once. Already used by + app.flood_layers.sandy_inundation but we want the geometry directly + for overlap-fraction math.""" + import geopandas as gpd + g = gpd.read_file(DATA / "sandy_inundation.geojson").to_crs("EPSG:2263") + # Some NYC OEM Sandy polygons have hole-orientation issues that + # blow up unary_union. buffer(0) fixes self-intersections without + # changing the footprint at sub-foot precision. + g["geometry"] = g.geometry.buffer(0) + return g.geometry.union_all() + + +@lru_cache(maxsize=4) +def _load_dep_2263(scenario: str): + """DEP scenario polygons in EPSG:2263, with depth_class column.""" + import geopandas as gpd + p = DATA / "dep" / f"{scenario}.geojson" + if not p.exists(): + # Fallback to whatever the existing dep_stormwater module loaded. + from app.flood_layers import dep_stormwater + gdf = dep_stormwater.load(scenario) + return gdf.to_crs("EPSG:2263") if gdf.crs is not None else gdf + return gpd.read_file(p).to_crs("EPSG:2263") + + +def _haversine_m(lat1, lon1, lat2, lon2) -> float: + R = 6371000.0 + p1, p2 = math.radians(lat1), math.radians(lat2) + dp = math.radians(lat2 - lat1); dl = math.radians(lon2 - lon1) + a = math.sin(dp / 2) ** 2 + math.cos(p1) * math.cos(p2) * math.sin(dl / 2) ** 2 + return 2 * R * math.asin(math.sqrt(a)) + + +def _sample_raster(raster_path: Path, lat: float, lon: float) -> float | None: + if not raster_path.exists(): + return None + try: + import rasterio + with rasterio.open(raster_path) as src: + v = next(src.sample([(lon, lat)]))[0] + v = float(v) + if math.isnan(v) or v == src.nodata: + return None + return v + except Exception: + log.exception("raster sample failed for %s", raster_path) + return None + + +def _developments_near(lat: float, lon: float, radius_m: float): + """Return developments whose centroid is within `radius_m` of + (lat, lon). Uses haversine on centroids re-projected back to + EPSG:4326 — the bbox prefilter gets us close, then exact distance.""" + import geopandas as gpd + gdf = _load_nycha() + # Re-project centroids to 4326 for haversine + cents_4326 = gpd.GeoSeries(gdf["centroid_2263"], crs="EPSG:2263").to_crs("EPSG:4326") + deg = radius_m / 90_000 + cent_lat = cents_4326.y + cent_lon = cents_4326.x + mask = ((cent_lat >= lat - deg) & (cent_lat <= lat + deg) + & (cent_lon >= lon - deg) & (cent_lon <= lon + deg)) + sub = gdf[mask].copy() + if sub.empty: + return sub, [] + sub["clat"] = cent_lat[mask].values + sub["clon"] = cent_lon[mask].values + sub["distance_m"] = sub.apply( + lambda r: _haversine_m(lat, lon, r["clat"], r["clon"]), + axis=1, + ) + sub = sub[sub["distance_m"] <= radius_m].sort_values("distance_m") + return sub, sub.index.tolist() + + +def _overlap_pct(geom_2263, mask_geom_2263) -> float: + """% of geom_2263's area that intersects mask_geom_2263.""" + if mask_geom_2263 is None or mask_geom_2263.is_empty: + return 0.0 + inter = geom_2263.intersection(mask_geom_2263) + if inter.is_empty: + return 0.0 + return round(100.0 * inter.area / max(geom_2263.area, 1e-9), 2) + + +def _dep_overlap(geom_2263, scenario: str) -> tuple[float, float]: + """Return (pct_any_depth, pct_deep_contiguous) of a polygon's area + inside the DEP scenario.""" + try: + gdf = _load_dep_2263(scenario) + except Exception: + log.exception("DEP load failed for %s", scenario) + return 0.0, 0.0 + if gdf is None or gdf.empty: + return 0.0, 0.0 + # Bbox-prefilter the DEP polygons to those near our development. + minx, miny, maxx, maxy = geom_2263.bounds + cand = gdf.cx[minx:maxx, miny:maxy] + if cand.empty: + return 0.0, 0.0 + # DEP NYC stormwater FGDB uses `Flooding_Category` (int16): + # 1=nuisance, 2=shallow, 3=deep contiguous (>4 ft). + cat_col = "Flooding_Category" if "Flooding_Category" in cand.columns else None + any_geom = cand.geometry.buffer(0).union_all() + if cat_col: + deep = cand[cand[cat_col] == 3] + deep_geom = deep.geometry.buffer(0).union_all() if not deep.empty else None + else: + deep_geom = None + pct_any = _overlap_pct(geom_2263, any_geom) + pct_deep = _overlap_pct(geom_2263, deep_geom) if deep_geom is not None else 0.0 + return pct_any, pct_deep + + +_DEPTH_LABEL = { + 0: "outside", + 1: "Nuisance (>4 in to 1 ft)", + 2: "Deep & Contiguous (1-4 ft)", + 3: "Deep Contiguous (>4 ft)", +} + + +def summary_for_point(lat: float, lon: float, + radius_m: float = DEFAULT_RADIUS_M, + max_developments: int = DEFAULT_MAX_PER_QUERY) -> dict: + """Return the N nearest tier-1-3 NYCHA developments to (lat, lon) + within radius_m, with their pre-computed exposure flags from the + register catalog at data/registers/nycha.json. + + The catalog is the source of truth for which developments are + flood-exposed (the bake script ran the polygon-overlap math once, + citywide). Per-query work is haversine + dict lookup — sub-ms even + on the HF Space CPU. Developments outside the tier-1-3 catalog + (truly unexposed inland sites) are intentionally not surfaced; + "no NYCHA developments at risk within 1 mi" is the honest answer + for low-exposure queries. + """ + from app.registers._loader import nearest_n + hits = nearest_n("nycha", lat, lon, radius_m, max_developments) + if not hits: + return {"available": False, + "n_developments": 0, + "radius_m": radius_m, + "developments": []} + + findings: list[DevelopmentFinding] = [] + for distance_m, row in hits: + snap = row.get("snap") or {} + dep = snap.get("dep") or {} + microtopo = snap.get("microtopo") or {} + + def _dep_class(scen: str) -> int: + d = dep.get(scen) or {} + return int(d.get("depth_class") or 0) + + c2080 = _dep_class("dep_extreme_2080") + c2050 = _dep_class("dep_moderate_2050") + ccur = _dep_class("dep_moderate_current") + + elev = microtopo.get("point_elev_m") + hand = microtopo.get("aoi_hand_m") or microtopo.get("hand_m") + + findings.append(DevelopmentFinding( + development=str(row.get("name", "")), + tds_num=str(row.get("tds_num", "")), + borough=str(row.get("borough", "")), + centroid_lat=round(float(row["lat"]), 5), + centroid_lon=round(float(row["lon"]), 5), + distance_m=round(distance_m, 1), + rep_elevation_m=round(float(elev), 2) if elev is not None else None, + rep_hand_m=round(float(hand), 2) if hand is not None else None, + inside_sandy_2012=bool(snap.get("sandy")), + dep_extreme_2080_class=c2080, + dep_extreme_2080_label=_DEPTH_LABEL.get(c2080, "outside"), + dep_moderate_2050_class=c2050, + dep_moderate_2050_label=_DEPTH_LABEL.get(c2050, "outside"), + dep_moderate_current_class=ccur, + dep_moderate_current_label=_DEPTH_LABEL.get(ccur, "outside"), + )) + + n_in_sandy = sum(1 for f in findings if f.inside_sandy_2012) + n_in_2080 = sum(1 for f in findings if f.dep_extreme_2080_class > 0) + return { + "available": True, + "n_developments": len(findings), + "radius_m": radius_m, + "n_inside_sandy_2012": n_in_sandy, + "n_in_dep_extreme_2080": n_in_2080, + "developments": [vars(f) for f in findings], + "citation": ("Pre-computed from NYC Open Data NYCHA Developments " + "(phvi-damg) joined to Sandy 2012 Inundation Zone " + "(5xsi-dfpx) + NYC DEP Stormwater Flood Maps + " + "USGS 3DEP DEM. See data/registers/nycha.json."), + } + + +def main() -> int: + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--lat", type=float, required=True) + ap.add_argument("--lon", type=float, required=True) + ap.add_argument("--radius", type=float, default=DEFAULT_RADIUS_M) + ap.add_argument("--max", type=int, default=DEFAULT_MAX_PER_QUERY) + args = ap.parse_args() + s = summary_for_point(args.lat, args.lon, args.radius, args.max) + print(json.dumps(s, indent=2, default=str)) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/app/score.py b/app/score.py new file mode 100644 index 0000000000000000000000000000000000000000..7e02fc86a731c7f08e80f641d4b63c8a1609c9a8 --- /dev/null +++ b/app/score.py @@ -0,0 +1,345 @@ +"""Riprap exposure scoring — research-grounded deterministic rubric. + +This is an EXPOSURE index, not a damage probability. It produces a tier +1-4 from a thematic additive composite over min-max-normalized indicators +within sub-indices. The same input always produces the same tier; live +signals (NWS alerts, surge residual, hourly precip) are NOT in this +score — they are surfaced as a separate "current conditions" badge per +NPCC4 / IPCC AR6 WG II's distinction between exposure (quasi-stationary +property of place) and event occurrence (time-varying). + +Methodology: +- Cutter, Boruff & Shirley, 2003. "Social Vulnerability to Environmental + Hazards." Social Science Quarterly 84(2): 242-261. — hazards-of-place + composite construction. +- Tate, 2012. "Social Vulnerability Indices: A Comparative Assessment + Using Uncertainty and Sensitivity Analysis." Natural Hazards 63: 325- + 347. — equal weights within thematic groups are the most rank-stable + default; differential weighting is hard to defend. +- Balica, Wright & van der Meulen, 2012. "A Flood Vulnerability Index + for Coastal Cities." Natural Hazards 64: 73-105. — multiplicative + override behaviour; we recover the important part as a "max-empirical + floor" rather than a full multiplicative form. + +Per-indicator citations: +- HAND breakpoints: Nobre et al., 2011. "Height Above the Nearest + Drainage." J. Hydrology 404: 13-29. +- TWI: Beven & Kirkby, 1979. Hydrological Sciences Bulletin 24; Sørensen, + Zinko & Seibert, 2006. HESS 10: 101-112. (Half-weight because TWI is + noisier than HAND in flat urban DEMs; we percentile-bin rather than + use absolute cutoffs.) +- Zone hierarchy: NYC NPCC4 (2024) Ch. 3; NYC Hazard Mitigation Plan 2024. +- USGS HWM proximity floor: USGS HWM positional uncertainty is typically + 5-30 m horizontal, so 100 m gives ~3σ headroom for a true "this + address was inundated" signal. + +Scope limit: We have no labeled flood-damage outcomes. The tier is a +literature-grounded exposure prior, not a calibrated loss prediction. +For insurance pricing, use FEMA Risk Rating 2.0 (claims-driven GLM). +""" +from __future__ import annotations + +import pandas as pd + +# ---------- Indicator schemas ---------------------------------------------- +# +# Each sub-index is a mapping {indicator_name: weight}. Within a sub-index, +# the weighted sum is normalized by the maximum possible weight, giving a +# 0-1 score per sub-index. The composite is the sum of the three sub-index +# scores (range 0-3), then mapped to tiers. +# +# Why equal weights within thematic groups: Tate 2012's uncertainty +# analysis showed that differential weighting is the most-attacked axis +# of any composite vulnerability/exposure index. Equal weights are the +# safest default; agency tiering (which puts FEMA 1% above 0.2%, Sandy +# above modeled scenarios) supplies the remaining structure. + +REGULATORY = { + # FEMA NFHL — regulatory baseline. SFHA (1%) is the mandate threshold. + "fema_1pct": 1.00, + "fema_02pct": 0.50, + # NYC DEP Stormwater Maps (2021) — modeled pluvial scenarios. + # Moderate-2050 is treated heavier than Extreme-2080 because NPCC4 + # explicitly designates 2080 SLR + 7 in/hr as a TAIL scenario. + "dep_moderate_2050": 0.75, + "dep_extreme_2080": 0.50, + "dep_tidal_2050": 0.75, +} + +HYDROLOGICAL = { + # HAND (Height Above Nearest Drainage), banded per Nobre et al. 2011. + # Bands: <1 m (channel/floodplain near-certain wet) → 1.0 + # 1-3 m (floodplain) → 0.66 + # 3-10 m (transitional) → 0.33 + # >10 m (hillslope, dry) → 0 + "hand_band": 1.00, + # TWI quartile (top quartile = saturation-prone). Half-weight + # because TWI is noisier than HAND in urban DEMs; we percentile-bin + # within NYC rather than using absolute cutoffs. + "twi_quartile": 0.50, + # Local-relief inversions: low percentile = topographic low point. + # Bins: <10th=1.0, 10-25th=0.66, 25-50th=0.33, ≥50th=0. + "elev_pct_200m_inv": 0.50, + "elev_pct_750m_inv": 0.50, + # Basin relief contributes a small additional terrain term. + "basin_relief_band": 0.25, +} + +EMPIRICAL = { + # Sandy 2012 inundation — empirical post-event extent. Also triggers + # the max-empirical FLOOR rule below. + "sandy": 1.00, + # USGS Hurricane Ida 2021 high-water marks. Within 100 m → "direct" + # (also triggers the floor); 100-800 m → "neighborhood proximity". + "ida_hwm_within_100m": 1.00, + "ida_hwm_within_800m": 0.50, + # Prithvi-EO 2.0 satellite-derived inundation polygon (Hurricane Ida + # pre/post diff) — semi-empirical because model-derived but + # conditioned on observed Sentinel-2 imagery. + "prithvi_polygon": 0.75, + # NYC 311 flood-related complaint count, banded over 5-year window: + # ≥10 → 1.0, 3-9 → 0.66, 1-2 → 0.33, 0 → 0 + # Weight capped at 0.75 because 311 has documented socio-economic + # reporting bias (engagement varies by neighborhood). + "complaints_band": 0.75, + # FloodNet trigger flag (any labeled flood event at any sensor + # within 600 m, last 3 years). Same 0.75 cap as 311 since both have + # spatial coverage bias. + "floodnet_trigger": 0.75, +} + + +def _hand_band(hand_m: float | None) -> float: + """Nobre et al. 2011 HAND classes adapted for NYC's flat urban terrain.""" + if hand_m is None: + return 0.0 + if hand_m < 1.0: + return 1.0 + if hand_m < 3.0: + return 0.66 + if hand_m < 10.0: + return 0.33 + return 0.0 + + +def _percentile_inv_band(pct: float | None) -> float: + """Inverted relief percentile: lower = more exposed (water pools here).""" + if pct is None: + return 0.0 + if pct < 10: + return 1.0 + if pct < 25: + return 0.66 + if pct < 50: + return 0.33 + return 0.0 + + +def _twi_quartile(twi: float | None) -> float: + """TWI thresholds calibrated to NYC's flat 30 m DEM. Top quartile + cutoff comes from the NYC-wide TWI distribution; here we approximate + with literature-typical breakpoints (Sørensen 2006 site-specific + advice).""" + if twi is None: + return 0.0 + if twi >= 12: + return 1.0 + if twi >= 10: + return 0.66 + if twi >= 8: + return 0.33 + return 0.0 + + +def _basin_relief_band(relief_m: float | None) -> float: + if relief_m is None: + return 0.0 + # Higher basin relief in a flat area means the address sits in a real + # depression. Banding is empirical for NYC. + if relief_m >= 8: + return 1.0 + if relief_m >= 4: + return 0.66 + if relief_m >= 2: + return 0.33 + return 0.0 + + +def _complaints_band(n: int | None) -> float: + if not n: + return 0.0 + if n >= 10: + return 1.0 + if n >= 3: + return 0.66 + if n >= 1: + return 0.33 + return 0.0 + + +# ---------- Sub-index computation ------------------------------------------ + +def _normalize(weighted: float, weights: dict[str, float]) -> float: + max_w = sum(weights.values()) + return weighted / max_w if max_w else 0.0 + + +def regulatory_subindex(s: dict) -> float: + """0..1. All inputs are binary (inside zone or not).""" + w = REGULATORY + raw = sum(w[k] * (1.0 if s.get(k) else 0.0) for k in w) + return _normalize(raw, w) + + +def hydrological_subindex(s: dict) -> float: + """0..1. Inputs are continuous; convert to ordinal bands first.""" + w = HYDROLOGICAL + bands = { + "hand_band": _hand_band(s.get("hand_m")), + "twi_quartile": _twi_quartile(s.get("twi")), + "elev_pct_200m_inv": _percentile_inv_band(s.get("rel_elev_pct_200m")), + "elev_pct_750m_inv": _percentile_inv_band(s.get("rel_elev_pct_750m")), + "basin_relief_band": _basin_relief_band(s.get("basin_relief_m")), + } + raw = sum(w[k] * bands[k] for k in w) + return _normalize(raw, w) + + +def empirical_subindex(s: dict) -> float: + """0..1. Mix of binary and banded count signals.""" + w = EMPIRICAL + vals = { + "sandy": 1.0 if s.get("sandy") else 0.0, + "ida_hwm_within_100m": 1.0 if s.get("ida_hwm_within_100m") else 0.0, + "ida_hwm_within_800m": 1.0 if s.get("ida_hwm_within_800m") else 0.0, + "prithvi_polygon": 1.0 if s.get("prithvi_polygon") else 0.0, + "complaints_band": _complaints_band(s.get("complaints_count")), + "floodnet_trigger": 1.0 if s.get("floodnet_trigger") else 0.0, + } + raw = sum(w[k] * vals[k] for k in w) + return _normalize(raw, w) + + +# ---------- Composite + tier mapping --------------------------------------- + +# Tier breakpoints over the composite (range 0-3, since each sub-index is +# 0-1). Tuned so that "Sandy + DEP-2050 + HAND<1m" lands in Tier 1, and a +# single positive signal lands in Tier 4. Documented in METHODOLOGY.md. +TIER_BREAKPOINTS = [ + (1.50, 1), # high — multiple sub-indices saturated + (1.00, 2), # elevated — at least one strong sub-index + (0.50, 3), # moderate — partial signals across categories + (0.01, 4), # limited — a single contextual signal +] + +TIER_LABELS = { + 1: ("High exposure", "Multiple sub-indices saturated; empirical and/or " + "modeled scenarios both indicate substantial exposure."), + 2: ("Elevated exposure", "At least one sub-index near saturation; significant " + "overlap with empirical or modeled scenarios."), + 3: ("Moderate exposure", "Partial signals across categories; scenario- or " + "neighborhood-specific exposure."), + 4: ("Limited exposure", "A single contextual signal; no positive scenario hits."), + 0: ("No flagged exposure", "No positive flood signal across the assessed sources."), +} + + +def composite(signals: dict) -> dict: + """Compute sub-indices, composite score, and tier with the floor rule. + + Returns: { + 'subindices': {'regulatory': 0..1, 'hydrological': 0..1, 'empirical': 0..1}, + 'composite': 0..3, + 'tier': 0..4, + 'floor_applied': bool, + } + + Max-empirical floor: if Sandy 2012 inundation OR a USGS Ida HWM within + 100 m fired, the tier is capped at 2 (cannot be worse). This recovers + the multiplicative behavior — empirical evidence overrides terrain or + modeled scenarios — without giving up additive transparency. + """ + reg = regulatory_subindex(signals) + hyd = hydrological_subindex(signals) + emp = empirical_subindex(signals) + composite_score = reg + hyd + emp + + raw_tier = 0 + for breakpoint, t in TIER_BREAKPOINTS: + if composite_score >= breakpoint: + raw_tier = t + break + + floor_applied = bool(signals.get("sandy") or signals.get("ida_hwm_within_100m")) + if floor_applied and (raw_tier == 0 or raw_tier > 2): + final_tier = 2 + else: + final_tier = raw_tier + + return { + "subindices": { + "regulatory": round(reg, 3), + "hydrological": round(hyd, 3), + "empirical": round(emp, 3), + }, + "composite": round(composite_score, 3), + "tier": final_tier, + "floor_applied": floor_applied, + } + + +# ---------- Backward-compat shims ------------------------------------------ +# Register CLI and register_builder consume a flat `tier` column on a +# DataFrame. The shim materializes composite() over rows and writes back +# `score` (composite scaled 0-100) and `tier`. + +def tier(score: int) -> int: + """Legacy bridge for callers that still pass a small-integer score. + Maps the OLD additive-integer score to the new tier breakpoints by + scaling. Prefer composite() for new code.""" + if score >= 6: return 1 + if score >= 4: return 2 + if score >= 2: return 3 + if score >= 1: return 4 + return 0 + + +# Legacy WEIGHTS map kept so riprap.py and any external consumer +# continue to import without breaking. The new composite() is the +# authoritative scorer. +WEIGHTS = { + "sandy": 3, + "dep_extreme_2080": 2, + "dep_moderate_2050": 2, + "dep_moderate_current": 1, + "complaints_3plus": 1, + "floodnet_trigger": 1, + "policy_named": 1, +} + + +def score_row(signals: dict) -> tuple[int, int]: + """Legacy-shape wrapper around composite(). Returns (composite_x100, tier).""" + c = composite(signals) + return int(round(c["composite"] * 100)), c["tier"] + + +def score_frame(df: pd.DataFrame) -> pd.DataFrame: + """Vectorized composite over a DataFrame whose columns name our + indicators. Missing columns are treated as 0 / None. + + Adds columns: subindex_regulatory, subindex_hydrological, + subindex_empirical, composite, score, tier, floor_applied. + `score` is the composite scaled 0-100 for register CSV legibility. + """ + out = df.copy() + rows = out.to_dict(orient="records") + results = [composite(r) for r in rows] + out["subindex_regulatory"] = [r["subindices"]["regulatory"] for r in results] + out["subindex_hydrological"] = [r["subindices"]["hydrological"] for r in results] + out["subindex_empirical"] = [r["subindices"]["empirical"] for r in results] + out["composite"] = [r["composite"] for r in results] + out["score"] = (out["composite"] * 100).round().astype(int) + out["tier"] = [r["tier"] for r in results] + out["floor_applied"] = [r["floor_applied"] for r in results] + return out diff --git a/app/spatial.py b/app/spatial.py new file mode 100644 index 0000000000000000000000000000000000000000..c379b909037993d3d2c61960ef616c15cf2c9b5e --- /dev/null +++ b/app/spatial.py @@ -0,0 +1,22 @@ +"""Spatial helpers. NYC works in EPSG:2263 (NY state plane, feet).""" +from __future__ import annotations + +from pathlib import Path + +import geopandas as gpd + +NYC_CRS = "EPSG:2263" # ft +WGS84 = "EPSG:4326" + +DATA = Path(__file__).resolve().parent.parent / "data" + + +def to_nyc(g: gpd.GeoDataFrame) -> gpd.GeoDataFrame: + if g.crs is None: + raise ValueError("layer has no CRS") + return g.to_crs(NYC_CRS) if g.crs.to_string() != NYC_CRS else g + + +def load_layer(path: str | Path, layer: str | None = None) -> gpd.GeoDataFrame: + g = gpd.read_file(path, layer=layer) if layer else gpd.read_file(path) + return to_nyc(g) diff --git a/app/stones/__init__.py b/app/stones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1dd70f1d77f6ebe0e2bde1f8303a3854ed29cf7 --- /dev/null +++ b/app/stones/__init__.py @@ -0,0 +1,41 @@ +"""Five Stones — conceptual grouping over the FSM specialists. + +Riprap's FSM runs ~20 atomic specialist actions; the Stones layer is a +thin re-grouping that gives the trace UI, the briefing prompt, and the +project's public framing five legible roles instead of 20 atomic +function calls. + +Each Stone module exposes the same shape: + + NAME — display name (e.g. "Cornerstone") + TAGLINE — single phrase used as a section header + DESCRIPTION — one-sentence description for the README / trace UI + SOURCES — list of FSM state keys this Stone aggregates from + collect(state) — pull this Stone's documents out of the state dict + +Order is meaningful: + 1. Cornerstone — the hazard reader (static record) + 2. Keystone — the asset register (exposure) + 3. Touchstone — the live observer (current sensors + EO) + 4. Lodestone — the projector (forecast) + 5. Capstone — the synthesiser (Granite 4.1 + Mellea) + +The first four are *data-Stones*; the Capstone IS the reconciler. +""" +from __future__ import annotations + +from app.stones import capstone, cornerstone, keystone, lodestone, touchstone + +# Iteration order for the briefing prompt and trace UI. +DATA_STONES = [cornerstone, keystone, touchstone, lodestone] +ALL_STONES = DATA_STONES + [capstone] + +__all__ = [ + "ALL_STONES", + "DATA_STONES", + "capstone", + "cornerstone", + "keystone", + "lodestone", + "touchstone", +] diff --git a/app/stones/capstone.py b/app/stones/capstone.py new file mode 100644 index 0000000000000000000000000000000000000000..7980aff02e141be18350dece8918dcc88ee0bbe6 --- /dev/null +++ b/app/stones/capstone.py @@ -0,0 +1,47 @@ +"""Capstone — the Synthesiser. + +Granite 4.1 (8B) writes the cited briefing under Mellea-validated +rejection sampling. Every numeric claim is anchored to a `[doc_id]` +citation pointing back into one of the four data-Stones; sentences +that fail the four grounding checks (`numerics_grounded`, +`no_placeholder_tokens`, `citations_dense`, `citations_resolve`) are +rolled with surgical feedback until the budget is exhausted. + +This module is a thin alias around `app.reconcile` — the working code +stays in `app/reconcile.py` for git-blame continuity. The naming is in +the user-facing trace and the README. +""" +from __future__ import annotations + +from typing import Any + +from app import reconcile as _reconcile + +NAME = "Capstone" +TAGLINE = "The Synthesiser" +DESCRIPTION = ( + "Writes the cited briefing — Granite 4.1 + Mellea rejection sampling." +) + +# Capstone consumes everything the four data-Stones produced; we don't +# enumerate state keys here because the reconciler reads the FSM state +# directly and `app/reconcile.py:build_documents()` is the source of +# truth for which keys it touches. +SOURCES: list[str] = [] + +# Re-export the reconciler entrypoints under the Stone name so callers +# can write `from app.stones import capstone; capstone.run(state)`. +build_documents = _reconcile.build_documents +trim_docs_to_plan = _reconcile.trim_docs_to_plan +verify_paragraph = _reconcile.verify_paragraph +run = _reconcile.reconcile +EXTRA_SYSTEM_PROMPT = _reconcile.EXTRA_SYSTEM_PROMPT + + +def collect(state: dict[str, Any]) -> dict[str, Any]: + """Return the Capstone's outputs from the state dict (for the trace).""" + out: dict[str, Any] = {} + for k in ("paragraph", "audit", "mellea"): + if state.get(k) is not None: + out[k] = state[k] + return out diff --git a/app/stones/cornerstone.py b/app/stones/cornerstone.py new file mode 100644 index 0000000000000000000000000000000000000000..51ad3a65f0df9585f740d816c29ca174116fa74e --- /dev/null +++ b/app/stones/cornerstone.py @@ -0,0 +1,38 @@ +"""Cornerstone — the Hazard Reader. + +Reads what NYC's ground remembers about flooding: empirical 2012 Sandy +extent, modelled DEP scenarios, 2021 Ida USGS high-water marks, baked +Prithvi-EO Ida-attributable polygons, and LiDAR-derived microtopography +(elevation / HAND / TWI). + +These are static records — they don't change between queries. They +ground the briefing in what already happened or has already been +modelled, and serve as the empirical anchor for everything the live +sensors and forecasts report. +""" +from __future__ import annotations + +from typing import Any + +NAME = "Cornerstone" +TAGLINE = "The Hazard Reader" +DESCRIPTION = "Reads what NYC's ground remembers about flooding." + +# FSM state keys this Stone aggregates. The order here mirrors the order +# documents are emitted into the reconciler prompt today. +SOURCES = [ + "sandy", # step_sandy — 2012 Sandy inundation extent + "dep", # step_dep — NYC DEP stormwater scenarios + "ida_hwm", # step_ida_hwm — USGS Ida 2021 high-water marks + "prithvi_water", # step_prithvi — baked Prithvi-EO Ida polygons + "microtopo", # step_microtopo — USGS 3DEP DEM + HAND/TWI +] + + +def collect(state: dict[str, Any]) -> dict[str, Any]: + """Return {state_key: value} for every Cornerstone source that fired. + + Drops keys whose value is None (the silence-over-confabulation + contract — specialists that didn't fire emit nothing). + """ + return {k: state[k] for k in SOURCES if state.get(k) is not None} diff --git a/app/stones/keystone.py b/app/stones/keystone.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4b61c2f09dd858709df7335f890a9d7949144b --- /dev/null +++ b/app/stones/keystone.py @@ -0,0 +1,35 @@ +"""Keystone — the Asset Register. + +Counts what the city has built on top of those hazards: subway +entrances, NYCHA developments, DOE schools, NYS DOH hospitals, and +(via the TerraMind-NYC-Buildings adapter, fine-tuned on NYC building +footprints on AMD MI300X) the building stock visible in current EO. + +These are the public-asset registers — the per-address briefing +quantifies how many of each asset class fall inside the hazard +footprints the Cornerstone established. +""" +from __future__ import annotations + +from typing import Any + +NAME = "Keystone" +TAGLINE = "The Asset Register" +DESCRIPTION = "Counts the public assets and built fabric exposed to the hazards." + +# Existing register specialists + the new TerraMind-Buildings tool +# (added in commit 4 of the Stones migration). Stones layer is +# tolerant of state keys that don't exist yet — `collect` skips +# anything absent. +SOURCES = [ + "mta_entrances", # step_mta_entrances — MTA entrance exposure + "nycha_developments", # step_nycha — NYCHA exposure + "doe_schools", # step_doe_schools — DOE schools exposure + "doh_hospitals", # step_doh_hospitals — NYS DOH hospitals + "terramind_buildings", # step_terramind_buildings (commit 4) — NYC LoRA +] + + +def collect(state: dict[str, Any]) -> dict[str, Any]: + """Return {state_key: value} for every Keystone source that fired.""" + return {k: state[k] for k in SOURCES if state.get(k) is not None} diff --git a/app/stones/lodestone.py b/app/stones/lodestone.py new file mode 100644 index 0000000000000000000000000000000000000000..12bbbe9b7e572f51e26dc75bcefd43e70207ccc0 --- /dev/null +++ b/app/stones/lodestone.py @@ -0,0 +1,34 @@ +"""Lodestone — the Projector. + +Projects what's coming next: NWS active flood-relevant alerts (the +National Weather Service's authoritative short-horizon watches / +warnings), Granite TimeSeries TTM r2 zero-shot forecasts of the Battery +surge residual and per-address NYC 311 complaint rates and per-sensor +FloodNet event recurrence, and (via the Granite-TTM-r2-Battery-Surge +fine-tune on AMD MI300X) a 96-hour surge nowcast. + +The Lodestone is the forward-looking Stone — every cited number here +is a forecast, framed as such in the briefing. +""" +from __future__ import annotations + +from typing import Any + +NAME = "Lodestone" +TAGLINE = "The Projector" +DESCRIPTION = "Projects what's coming: alerts, surge, and recurrence forecasts." + +# Existing forecast specialists + the new fine-tuned Battery surge +# nowcast (added in commit 6). +SOURCES = [ + "nws_alerts", # step_nws_alerts — NWS public alerts + "ttm_forecast", # step_ttm_forecast — TTM r2 Battery zero-shot + "ttm_311_forecast", # step_ttm_311_forecast — TTM r2 311 weekly + "floodnet_forecast", # step_floodnet_forecast — TTM r2 FloodNet recurrence + "ttm_battery_surge", # step_ttm_battery_surge (commit 6) — fine-tuned +] + + +def collect(state: dict[str, Any]) -> dict[str, Any]: + """Return {state_key: value} for every Lodestone source that fired.""" + return {k: state[k] for k in SOURCES if state.get(k) is not None} diff --git a/app/stones/touchstone.py b/app/stones/touchstone.py new file mode 100644 index 0000000000000000000000000000000000000000..4a44a6ab2488daa168b6ee7693ebfafcd56f66e7 --- /dev/null +++ b/app/stones/touchstone.py @@ -0,0 +1,35 @@ +"""Touchstone — the Live Observer. + +Watches what's happening right now: FloodNet ultrasonic depth sensors, +NYC 311 flood-complaint history, NWS hourly METAR observations, NOAA +tide-gauge water levels, and per-query EO segmentation +(Prithvi-EO 2.0 NYC Pluvial fine-tune for water/flood; TerraMind-NYC +LULC adapter for current land cover). + +The Touchstone is the "current state of the world" Stone. Its outputs +change minute to minute and are explicitly framed in the briefing as +right-now context, not historical record. +""" +from __future__ import annotations + +from typing import Any + +NAME = "Touchstone" +TAGLINE = "The Live Observer" +DESCRIPTION = "Watches the current state of the city's flood signals and EO." + +# Live sensors + per-query EO. `prithvi_live` becomes the NYC Pluvial +# v2 fine-tune in commit 5; `terramind_lulc` is added in commit 4. +SOURCES = [ + "floodnet", # step_floodnet — FloodNet sensor network + "nyc311", # step_311 — NYC 311 flood complaints + "nws_obs", # step_nws_obs — NWS hourly METAR obs + "noaa_tides", # step_noaa_tides — NOAA tide gauge water level + "prithvi_live", # step_prithvi_live — Prithvi-EO 2.0 (v2 in commit 5) + "terramind_lulc", # step_terramind_lulc (commit 4) — NYC LULC adapter +] + + +def collect(state: dict[str, Any]) -> dict[str, Any]: + """Return {state_key: value} for every Touchstone source that fired.""" + return {k: state[k] for k in SOURCES if state.get(k) is not None} diff --git a/assets/logo-blue.svg b/assets/logo-blue.svg new file mode 100644 index 0000000000000000000000000000000000000000..498374c82b942250adfc4480374c1e52d29b36ea --- /dev/null +++ b/assets/logo-blue.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + diff --git a/assets/logo-paper.png b/assets/logo-paper.png new file mode 100644 index 0000000000000000000000000000000000000000..368c9f5af8e5dc094195571d9c6e8d02ffe92cdd Binary files /dev/null and b/assets/logo-paper.png differ diff --git a/assets/logo-paper.svg b/assets/logo-paper.svg new file mode 100644 index 0000000000000000000000000000000000000000..106e00fc928817e0c1b18bf396d88cc7aa17bfe2 --- /dev/null +++ b/assets/logo-paper.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + diff --git a/assets/logo.png b/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a4fadbe5e938a4f6b9d35c9f5a61ba8f97ed5685 Binary files /dev/null and b/assets/logo.png differ diff --git a/assets/logo.svg b/assets/logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..5e96bac27088942889b25ef3a93cca73eef5c756 --- /dev/null +++ b/assets/logo.svg @@ -0,0 +1,22 @@ + + + + + + + + + + + diff --git a/corpus/comptroller_rain_2024.pdf b/corpus/comptroller_rain_2024.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7bf01a9a571f005c62ad8080ffd3f5ce47e454e4 --- /dev/null +++ b/corpus/comptroller_rain_2024.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c4ad386a3eaffc2278a44013f852c2ddd1bf06e278346227e24615ee3a387fc +size 2616885 diff --git a/corpus/coned_22_e_0222.pdf b/corpus/coned_22_e_0222.pdf new file mode 100644 index 0000000000000000000000000000000000000000..740aed3bd8db40545fe584af7cf7cc4aa52e19b1 --- /dev/null +++ b/corpus/coned_22_e_0222.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d76c7f108bd0336cefa5dd6fb5064cf91adee3c9c1b91eeaa279d0dd0fcdb59 +size 5045344 diff --git a/corpus/dep_wastewater_2013.pdf b/corpus/dep_wastewater_2013.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6afd0c65925721db9b33f4087a8451e73d4fa921 --- /dev/null +++ b/corpus/dep_wastewater_2013.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e57a402851521bd61494c572fded8327cd28b35118eb95f7bf7d0bd2bdba32d +size 732738 diff --git a/corpus/mta_resilience_2025.pdf b/corpus/mta_resilience_2025.pdf new file mode 100644 index 0000000000000000000000000000000000000000..21c8a9d6e9f39939bc85f10ce39c9e8b0c98a208 --- /dev/null +++ b/corpus/mta_resilience_2025.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d7e3e7c6634adbfc00ee80f7f5bfe1a2d9c46ce0d1321363d3b3bb3446a2582 +size 8455213 diff --git a/corpus/nycha_lessons.pdf b/corpus/nycha_lessons.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7e52e6a7f1540e8e0b33dfd55d787e765063d017 --- /dev/null +++ b/corpus/nycha_lessons.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d460034b6a786fcf8b0a96aa3da983d9131bfdb521d7967b9d29c1f55e265d8c +size 14226816 diff --git a/data/baked/dep_extreme_2080.tif b/data/baked/dep_extreme_2080.tif new file mode 100644 index 0000000000000000000000000000000000000000..9ff61c2fab845fc9c47a5cc9030ad5613945842e --- /dev/null +++ b/data/baked/dep_extreme_2080.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc0586e57d11e91ebec5dd0413aa81c7258d88e063a4f03d1724cdf15e734ddd +size 3629020 diff --git a/data/baked/dep_moderate_2050.tif b/data/baked/dep_moderate_2050.tif new file mode 100644 index 0000000000000000000000000000000000000000..51359adbb6fed4941a8dbd3988380ea62ef706e4 --- /dev/null +++ b/data/baked/dep_moderate_2050.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9b9d1d6aefe462bb379eb23545f14ef5ac0b77f27d0edbbf0245ee496c0663b +size 1318491 diff --git a/data/baked/dep_moderate_current.tif b/data/baked/dep_moderate_current.tif new file mode 100644 index 0000000000000000000000000000000000000000..bde8bc39971b126ca4e2013ea8d75549549328a2 --- /dev/null +++ b/data/baked/dep_moderate_current.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:651e5cb1a019ef5fee9b2743c58b93ea5ceb4dd3ff1ac62554c03a62dee1c909 +size 917087 diff --git a/data/baked/sandy.tif b/data/baked/sandy.tif new file mode 100644 index 0000000000000000000000000000000000000000..e623640e45de89d7d3f5216a723908ec7b5a7746 --- /dev/null +++ b/data/baked/sandy.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a0dcf61533dfac5018c8bda84f93478dfd51788d62a43717a2529bcc9b8cd6b +size 1240643 diff --git a/data/dep/dep_extreme_2080.gdb/a00000001.TablesByName.atx b/data/dep/dep_extreme_2080.gdb/a00000001.TablesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..983d2907125d86caaeb30e849543ff95aa08f74b --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000001.TablesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:000e527ff96249b979cbaae8ad7a22ecf07e7cda9c61f95cb531df2c761e1da5 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000001.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000001.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..2cb40e992459d7d96cd3292afe1589bea33baf80 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000001.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80ba95b873e151074244f7655f9e29ce8967aab1bd57ff4c06a329b723a3d46c +size 110 diff --git a/data/dep/dep_extreme_2080.gdb/a00000001.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000001.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..6cab0d5d81c6cb65b99d8f59da66383ecb9f3059 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000001.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6510211b1fb4d2e196327686c973d866c1158192639615e8d7166e6e11c11abf +size 393 diff --git a/data/dep/dep_extreme_2080.gdb/a00000001.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000001.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..48639c1b4f534813c74462a1eb7c8cc4ed0b9a35 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000001.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b65d6bdd23b973033884654531d2ed75d5c4e55fd260bf23631bbecf9f7edab5 +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000002.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000002.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..c199f083fbb750e47111dcb081e167ab357578ac --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000002.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9efa50dc62a40e109a546054943ae5ad4b3d41aa81ece0dfa026b7b23cb09da8 +size 2055 diff --git a/data/dep/dep_extreme_2080.gdb/a00000002.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000002.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..feb01457922b40c5ad69359ce5f12a3fc4cccffb --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000002.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3ac9865dec6ab40e73d91dd1f5c2eef0248e66e359d4abb1d97edba92b5df88 +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000003.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000003.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..fcd5228d913fb3146e912510c4f60dcb2d9ef834 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000003.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de0911d18e2f304380c04ae123ea18057d262d788301f7cc3148298231c7c618 +size 42 diff --git a/data/dep/dep_extreme_2080.gdb/a00000003.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000003.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..cfb9b8ec1fff3f23e91b0a8e1f1b283bc6f077a4 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000003.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c8869c8a6d11b5937eaa9b04e91f5e1aedb0835c762692b2518d134e7d71818 +size 1175 diff --git a/data/dep/dep_extreme_2080.gdb/a00000003.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000003.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..ff5b7a1852667784216ed99d9e1f66291580698e --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000003.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d63fd5f8a490aa6aeffde1336e80ce35b5399af5752a72b316570a670b09d167 +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.CatItemsByPhysicalName.atx b/data/dep/dep_extreme_2080.gdb/a00000004.CatItemsByPhysicalName.atx new file mode 100644 index 0000000000000000000000000000000000000000..80475a3938053be4c0977fc9fb3c4e2707ae646f --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.CatItemsByPhysicalName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a7de6738713b7302931d9efba3ce9a7023e88accad42e3c77de7822a1eae897 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.CatItemsByType.atx b/data/dep/dep_extreme_2080.gdb/a00000004.CatItemsByType.atx new file mode 100644 index 0000000000000000000000000000000000000000..f3d2fdee5a78044688a562dcc6f5fdc24527ffa7 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.CatItemsByType.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b22ce45a2251a7b5891c3947f182009a5bebfe77ed68fbc993df7fda7990862 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.FDO_UUID.atx b/data/dep/dep_extreme_2080.gdb/a00000004.FDO_UUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..ec5b2a62ee0bc1926486f176586a3f3d67121ffb --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.FDO_UUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28862222a905695314d2d08e8fe42c160512434248410966b5b55a06a1bf535d +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.freelist b/data/dep/dep_extreme_2080.gdb/a00000004.freelist new file mode 100644 index 0000000000000000000000000000000000000000..fd096f84320f8c072cbdcfc2aad3c53c144a7a09 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.freelist @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b92233cf269468bb389dc6eee786735705247e26f1caa75f4f48ffe7175479 +size 24920 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000004.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..4d522bfcb49b21fdd987c6d51afe3cdff16851cc --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f29887c93b126a3feb0e20958cd73cc774f76ab79284b9c7286dc12eb05db858 +size 310 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000004.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..090e4deb35797435bd07a631d8293eebc8c1701a --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afbd7d687b7c8fc271adf154ac927d015eb6d0c828c418dcb778838476f7592b +size 2913622 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000004.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..377612acbc80cad17b61db2cd8aaa2450025bbe4 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63c1f33346633d88222c47918aaa13b643153a7265a0350932d631471bcc5b8e +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.horizon b/data/dep/dep_extreme_2080.gdb/a00000004.horizon new file mode 100644 index 0000000000000000000000000000000000000000..88b384023ae287c8b4ccb9f52c9d4f83138f0a02 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.horizon @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fab34115b351a30fb6adb5818d8f08e4dd3a024b7247dbbc61c56929ab6e223 +size 32 diff --git a/data/dep/dep_extreme_2080.gdb/a00000004.spx b/data/dep/dep_extreme_2080.gdb/a00000004.spx new file mode 100644 index 0000000000000000000000000000000000000000..e374a516eb74584bed1db8a2f0364b8bd4a6e141 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000004.spx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d63feebe0d7b0fb0ba1769e2bbdd6822f8cfef32fa1668832d0ff6180e071e5 +size 45078 diff --git a/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByName.atx b/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..e845e87340870356d4a47c01080989cc4df15d7f --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e901522888a8153c398e005ac61c7352e3fe920bf2b7a84d76d2b4caeee9817 +size 12310 diff --git a/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByParentTypeID.atx b/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByParentTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..2718ced910e30f3ccfc09e6a3362579ba9ab4aae --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByParentTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8ba35a73b3e10a55caae8ccf875f10ca3288d1c0d18aad51bf52182b322ec0e +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByUUID.atx b/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByUUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..cb0cb24e0633fa818cef5c78d93060f5e8d525b1 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000005.CatItemTypesByUUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:480f1f90818c3dc361c6e9ffc09330781a1c55ba52f7cb1a1925b2d791beba5e +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000005.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000005.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..16d134901f9fc0f82952f5cfc182df6836237c7d --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000005.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c21e650994b48cc72d174b3caf2ac1bd19e2d7305c8087aaae97e24caecf98a5 +size 296 diff --git a/data/dep/dep_extreme_2080.gdb/a00000005.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000005.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..ca7570f4da64276960756f98dcc1b5d67c70b2bf --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000005.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:457b2ef15090d858e7c3e88d41131d1054b241836d2f4cc6d61d082bbf7e6aa3 +size 2071 diff --git a/data/dep/dep_extreme_2080.gdb/a00000005.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000005.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..db7efb779c0426a701d7187da4b6d5be2b31f2dc --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000005.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbe2191ecb633ff3a4c3d449dd4dcd964389c72dc6686aa02f6ce6fcea31cc6f +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByDestinationID.atx b/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByDestinationID.atx new file mode 100644 index 0000000000000000000000000000000000000000..a657676d939b27913f00603cb741003c2eec4e49 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByDestinationID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd7047061f201bf4c5fe902573659335a76ba85eec37a8fd2000daf04fd4f345 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByOriginID.atx b/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByOriginID.atx new file mode 100644 index 0000000000000000000000000000000000000000..0d096762c31f46c80a63ccf66f134b1a28a2ff83 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByOriginID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14fa81cafa6949f01b455bed9717b8be9cc4db6e12b99b52bc74efa819393b63 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByType.atx b/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByType.atx new file mode 100644 index 0000000000000000000000000000000000000000..0ee3e4343b3ae3c11eb717c6798dea7cbec8590d --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.CatRelsByType.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b48641a75f673005c01d96908edfa4313d1fad7ffb4306e9789ab3a97c73974 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.FDO_UUID.atx b/data/dep/dep_extreme_2080.gdb/a00000006.FDO_UUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..37f48cb0ba50175ef48127c5b481c576c429fbce --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.FDO_UUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3e26d08cb65c16e4b4a141875ba13305dccf26049ae0fb5d3a321d16737461 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000006.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..8dbf1dcde75c5bec68e67f59a4f604865d70bc67 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1919a00326f198a90dc97545ec448c10b2b90d367020222212f7b00433fe09b5 +size 318 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000006.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..8aa40f5839dd23a6e497c73b37ec3dcba1bcc507 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e0630d80a3791aac69bf436e0cb2d74dbde97050577cbb155b0fdd9c5a8696 +size 336 diff --git a/data/dep/dep_extreme_2080.gdb/a00000006.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000006.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..acc009a83b5c2cfe0952a8b5686e64b755aa5cb4 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000006.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24c4808826c22c92628c9f4d16cc80287e08b7fe2ae820e73a8599d96eacfe97 +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByBackwardLabel.atx b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByBackwardLabel.atx new file mode 100644 index 0000000000000000000000000000000000000000..cd20e7262134cda019fc4ebd67ba5e74eac6e794 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByBackwardLabel.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d51717d5b9de760d2488edb544637d875f131ad96483b96bf95aecff7f679d5 +size 12310 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByDestItemTypeID.atx b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByDestItemTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..dbcb3fa4bf902ea0d8296b47de5cc8ab81c1dcb6 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByDestItemTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:655a5548325b8adf939e69851f9f52ae3b61244d998df60811169142a1a77648 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByForwardLabel.atx b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByForwardLabel.atx new file mode 100644 index 0000000000000000000000000000000000000000..a055d8a0f275c5fa40737c628693a552e1d7e846 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByForwardLabel.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3471d1b80a6cc2ac1a094743a52f72a6dd6bbcca787604cacaa0ba409a9b583 +size 12310 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByName.atx b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..3a23aafca7a030bf250060d009e1a2244e978960 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4d57cabb14dc7b1f3510441e0b5ebd94f01e7412f272993b98502eea3d9577 +size 12310 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..ca7146e89ee908dd68893f736ee7a5f1f50aeeb9 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c7f665eb045df2a910a6800d0919af507bed2a42d2e9547a4004ba235fec5de +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByUUID.atx b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByUUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..0f695a37c51f948a3d05c431df0c8bd8dfa6f36a --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.CatRelTypesByUUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:209099cc5bdb713a603488740c831b805af7138397499f5d946a2c8aed69d446 +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000007.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..d9e21aab9dc1fb2f3598c153db5e7d3008bf5166 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc98e24d04eddd7f38905d176c1ea43c547a3a2daa0c80091a20d3443c726621 +size 602 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000007.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..178021a866e94c0c2cede66d9b132220ad1fa071 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cc24256e330ab468b5c594309b1493c2028fe072c9f13e5fe41b2084ce9a16 +size 3626 diff --git a/data/dep/dep_extreme_2080.gdb/a00000007.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000007.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..9e13ad03f7ea312986f4082415309ae16b0896b1 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000007.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d81aa3c399da5feba68a24ae6461edfca18cfaa9d81a1f1dab9b8b8a64e590c +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000009.gdbindexes b/data/dep/dep_extreme_2080.gdb/a00000009.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..411a21b962bc28575095cac36b825dc2740869e8 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000009.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760bd4864717c60d5e7c0c6461a622062680e6f79f51250f49456aa609b43774 +size 116 diff --git a/data/dep/dep_extreme_2080.gdb/a00000009.gdbtable b/data/dep/dep_extreme_2080.gdb/a00000009.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..360b020a64f1aa78fd2dce98c4012e80da39491c --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000009.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45eb6e68a69bd433a44b508dccb0be91608bf9f786f7eeaadd0e6be9fef26038 +size 30563127 diff --git a/data/dep/dep_extreme_2080.gdb/a00000009.gdbtablx b/data/dep/dep_extreme_2080.gdb/a00000009.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..487743211c7b30b57b082b5f0ec4c214d81214fb --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000009.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c58be6685e716b5b0b03664fe8fa632a2aad9428f7e24145f9014f9ddd62e5c +size 5152 diff --git a/data/dep/dep_extreme_2080.gdb/a00000009.horizon b/data/dep/dep_extreme_2080.gdb/a00000009.horizon new file mode 100644 index 0000000000000000000000000000000000000000..5c53f7e90d4127ee3ce28fc173278972653fa297 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000009.horizon @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73f51c36d09203210a9d16b125c20b9332b23ca525e51515a7984c87392f0f04 +size 32 diff --git a/data/dep/dep_extreme_2080.gdb/a00000009.spx b/data/dep/dep_extreme_2080.gdb/a00000009.spx new file mode 100644 index 0000000000000000000000000000000000000000..0e0b9d61ad20c2e5866034116140f2e3acc13bc9 --- /dev/null +++ b/data/dep/dep_extreme_2080.gdb/a00000009.spx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc1226dafde71b5f0f4974cd65e5bbd5c16f4f580ac617c3446794ae8e859f3a +size 4118 diff --git a/data/dep/dep_extreme_2080.gdb/gdb b/data/dep/dep_extreme_2080.gdb/gdb new file mode 100644 index 0000000000000000000000000000000000000000..a786e127004dd9e94e88fda7742d248237ad8885 Binary files /dev/null and b/data/dep/dep_extreme_2080.gdb/gdb differ diff --git a/data/dep/dep_extreme_2080.gdb/timestamps b/data/dep/dep_extreme_2080.gdb/timestamps new file mode 100644 index 0000000000000000000000000000000000000000..03582fd87d669c04deb7fd9baf5801245c090ebc Binary files /dev/null and b/data/dep/dep_extreme_2080.gdb/timestamps differ diff --git a/data/dep/dep_moderate_2050.gdb/a00000001.TablesByName.atx b/data/dep/dep_moderate_2050.gdb/a00000001.TablesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..6018c899a7f40fb5b3057c66b443182b4ac8db81 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000001.TablesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cdcd8324ef696909dcd9f8048a9813f7f22f206d0eb91ae6c945f93dae7869a +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000001.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000001.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..2cb40e992459d7d96cd3292afe1589bea33baf80 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000001.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80ba95b873e151074244f7655f9e29ce8967aab1bd57ff4c06a329b723a3d46c +size 110 diff --git a/data/dep/dep_moderate_2050.gdb/a00000001.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000001.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..807ab76e0d5b04d2acb9c0a95865bf6f248c69f8 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000001.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f77c737ccf3b6dd8657a307e1f29944c38ce2e13efe4596c5876599b55e0f205 +size 387 diff --git a/data/dep/dep_moderate_2050.gdb/a00000001.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000001.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..48639c1b4f534813c74462a1eb7c8cc4ed0b9a35 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000001.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b65d6bdd23b973033884654531d2ed75d5c4e55fd260bf23631bbecf9f7edab5 +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000002.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000002.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..c199f083fbb750e47111dcb081e167ab357578ac --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000002.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9efa50dc62a40e109a546054943ae5ad4b3d41aa81ece0dfa026b7b23cb09da8 +size 2055 diff --git a/data/dep/dep_moderate_2050.gdb/a00000002.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000002.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..feb01457922b40c5ad69359ce5f12a3fc4cccffb --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000002.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3ac9865dec6ab40e73d91dd1f5c2eef0248e66e359d4abb1d97edba92b5df88 +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000003.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000003.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..fcd5228d913fb3146e912510c4f60dcb2d9ef834 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000003.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de0911d18e2f304380c04ae123ea18057d262d788301f7cc3148298231c7c618 +size 42 diff --git a/data/dep/dep_moderate_2050.gdb/a00000003.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000003.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..4e65904751de6a69b638375f1ff4d41f74caf4eb --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000003.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9794e3f13fa8ce570cfd881605dd5187aea108759563fea2e19eb5bcf16f5733 +size 1175 diff --git a/data/dep/dep_moderate_2050.gdb/a00000003.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000003.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..ff5b7a1852667784216ed99d9e1f66291580698e --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000003.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d63fd5f8a490aa6aeffde1336e80ce35b5399af5752a72b316570a670b09d167 +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.CatItemsByPhysicalName.atx b/data/dep/dep_moderate_2050.gdb/a00000004.CatItemsByPhysicalName.atx new file mode 100644 index 0000000000000000000000000000000000000000..e59ad5cc2d58c63dd8346da78872783e26bd6b6a --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.CatItemsByPhysicalName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab8aa59adc56b6c598f83b7b2b4b5b8848980e1631673eb9f4e1b81bcabf4f3f +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.CatItemsByType.atx b/data/dep/dep_moderate_2050.gdb/a00000004.CatItemsByType.atx new file mode 100644 index 0000000000000000000000000000000000000000..89815a826ee09e30e2a6257e6af09f49f6b3f70e --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.CatItemsByType.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6aa729e6634424b8507b5f151d6b38fe079b39860c0ec78d954644b9ae1383c +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.FDO_UUID.atx b/data/dep/dep_moderate_2050.gdb/a00000004.FDO_UUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..cba49948311e1081181f6566a755c48a1f7128e1 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.FDO_UUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a21b55da6e37a7a553db33d180509aa65ccc6353fece34ec0836c3584b8288e +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.freelist b/data/dep/dep_moderate_2050.gdb/a00000004.freelist new file mode 100644 index 0000000000000000000000000000000000000000..81634ab4361fea32cf26fced01b81564356082a9 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.freelist @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bba4b65523b3c41382e0ab28abc9279dce271e92768665e60e63f00238c3e72 +size 20824 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000004.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..4d522bfcb49b21fdd987c6d51afe3cdff16851cc --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f29887c93b126a3feb0e20958cd73cc774f76ab79284b9c7286dc12eb05db858 +size 310 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000004.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..9acc5dd38f97aa72d3af34a73c3a0e2385e8dc6c --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f83f7aaaf1f94fd3c45a1ddc00b32c862c748f5e384c5080b1cf550954296a50 +size 1033388 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000004.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..853a33ab3af6b435f89cc93f8af332e3fb8a767d --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6abd5fc7474c61b8e209df29b7cff802131bf9d602b5b31b2457e08819db9e34 +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.horizon b/data/dep/dep_moderate_2050.gdb/a00000004.horizon new file mode 100644 index 0000000000000000000000000000000000000000..88b384023ae287c8b4ccb9f52c9d4f83138f0a02 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.horizon @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fab34115b351a30fb6adb5818d8f08e4dd3a024b7247dbbc61c56929ab6e223 +size 32 diff --git a/data/dep/dep_moderate_2050.gdb/a00000004.spx b/data/dep/dep_moderate_2050.gdb/a00000004.spx new file mode 100644 index 0000000000000000000000000000000000000000..2ff5379dfb50a98451295bf32d371b3347be730b --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000004.spx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d66f878127748888185f7bd574874816ed8f96c1392fa3e0f322c5f6e080e09 +size 40982 diff --git a/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByName.atx b/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..e845e87340870356d4a47c01080989cc4df15d7f --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e901522888a8153c398e005ac61c7352e3fe920bf2b7a84d76d2b4caeee9817 +size 12310 diff --git a/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByParentTypeID.atx b/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByParentTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..2718ced910e30f3ccfc09e6a3362579ba9ab4aae --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByParentTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8ba35a73b3e10a55caae8ccf875f10ca3288d1c0d18aad51bf52182b322ec0e +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByUUID.atx b/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByUUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..cb0cb24e0633fa818cef5c78d93060f5e8d525b1 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000005.CatItemTypesByUUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:480f1f90818c3dc361c6e9ffc09330781a1c55ba52f7cb1a1925b2d791beba5e +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000005.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000005.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..16d134901f9fc0f82952f5cfc182df6836237c7d --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000005.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c21e650994b48cc72d174b3caf2ac1bd19e2d7305c8087aaae97e24caecf98a5 +size 296 diff --git a/data/dep/dep_moderate_2050.gdb/a00000005.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000005.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..ca7570f4da64276960756f98dcc1b5d67c70b2bf --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000005.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:457b2ef15090d858e7c3e88d41131d1054b241836d2f4cc6d61d082bbf7e6aa3 +size 2071 diff --git a/data/dep/dep_moderate_2050.gdb/a00000005.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000005.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..db7efb779c0426a701d7187da4b6d5be2b31f2dc --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000005.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbe2191ecb633ff3a4c3d449dd4dcd964389c72dc6686aa02f6ce6fcea31cc6f +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByDestinationID.atx b/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByDestinationID.atx new file mode 100644 index 0000000000000000000000000000000000000000..f7f602555719f42969af7b063a1c0a065fced438 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByDestinationID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5248ea88cc6198dd2fd3d096e28010cb2d41bb950a00d1b33854b2252fc26242 +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByOriginID.atx b/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByOriginID.atx new file mode 100644 index 0000000000000000000000000000000000000000..a3429cd89d9e7fd3940cd1f45224ce414d0d1696 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByOriginID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa63315603071bf9e23a32ef91e603f0517f6797c4c6e3ef17e939e01043e39d +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByType.atx b/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByType.atx new file mode 100644 index 0000000000000000000000000000000000000000..0ee3e4343b3ae3c11eb717c6798dea7cbec8590d --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.CatRelsByType.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b48641a75f673005c01d96908edfa4313d1fad7ffb4306e9789ab3a97c73974 +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.FDO_UUID.atx b/data/dep/dep_moderate_2050.gdb/a00000006.FDO_UUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..610c5ead23161a8b8883023b0fe4562856ae6a06 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.FDO_UUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23599b7309996f38624b41cea9d48110a9121b94e4d92f616db1fb8c6bb6b992 +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000006.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..8dbf1dcde75c5bec68e67f59a4f604865d70bc67 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1919a00326f198a90dc97545ec448c10b2b90d367020222212f7b00433fe09b5 +size 318 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000006.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..9a2c9793c6d350791454baf262d2d4cc2d9dcaf3 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55f5eb61f7c6c7cdc275430313c982f263a5551b98eaf5d1ae35ca9493a20cf1 +size 336 diff --git a/data/dep/dep_moderate_2050.gdb/a00000006.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000006.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..acc009a83b5c2cfe0952a8b5686e64b755aa5cb4 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000006.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24c4808826c22c92628c9f4d16cc80287e08b7fe2ae820e73a8599d96eacfe97 +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByBackwardLabel.atx b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByBackwardLabel.atx new file mode 100644 index 0000000000000000000000000000000000000000..cd20e7262134cda019fc4ebd67ba5e74eac6e794 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByBackwardLabel.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d51717d5b9de760d2488edb544637d875f131ad96483b96bf95aecff7f679d5 +size 12310 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByDestItemTypeID.atx b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByDestItemTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..dbcb3fa4bf902ea0d8296b47de5cc8ab81c1dcb6 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByDestItemTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:655a5548325b8adf939e69851f9f52ae3b61244d998df60811169142a1a77648 +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByForwardLabel.atx b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByForwardLabel.atx new file mode 100644 index 0000000000000000000000000000000000000000..a055d8a0f275c5fa40737c628693a552e1d7e846 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByForwardLabel.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3471d1b80a6cc2ac1a094743a52f72a6dd6bbcca787604cacaa0ba409a9b583 +size 12310 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByName.atx b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..3a23aafca7a030bf250060d009e1a2244e978960 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4d57cabb14dc7b1f3510441e0b5ebd94f01e7412f272993b98502eea3d9577 +size 12310 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..ca7146e89ee908dd68893f736ee7a5f1f50aeeb9 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c7f665eb045df2a910a6800d0919af507bed2a42d2e9547a4004ba235fec5de +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByUUID.atx b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByUUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..0f695a37c51f948a3d05c431df0c8bd8dfa6f36a --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.CatRelTypesByUUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:209099cc5bdb713a603488740c831b805af7138397499f5d946a2c8aed69d446 +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000007.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..d9e21aab9dc1fb2f3598c153db5e7d3008bf5166 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc98e24d04eddd7f38905d176c1ea43c547a3a2daa0c80091a20d3443c726621 +size 602 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000007.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..178021a866e94c0c2cede66d9b132220ad1fa071 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cc24256e330ab468b5c594309b1493c2028fe072c9f13e5fe41b2084ce9a16 +size 3626 diff --git a/data/dep/dep_moderate_2050.gdb/a00000007.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000007.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..9e13ad03f7ea312986f4082415309ae16b0896b1 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000007.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d81aa3c399da5feba68a24ae6461edfca18cfaa9d81a1f1dab9b8b8a64e590c +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000009.gdbindexes b/data/dep/dep_moderate_2050.gdb/a00000009.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..411a21b962bc28575095cac36b825dc2740869e8 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000009.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760bd4864717c60d5e7c0c6461a622062680e6f79f51250f49456aa609b43774 +size 116 diff --git a/data/dep/dep_moderate_2050.gdb/a00000009.gdbtable b/data/dep/dep_moderate_2050.gdb/a00000009.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..30f667cac9c82eba7f7a9a5436cb50dd3bc933cb --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000009.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70d96cf80b5113e029a9e8ef46329a93f02731cb522e5a0ecbe910f1b4b17be4 +size 6778053 diff --git a/data/dep/dep_moderate_2050.gdb/a00000009.gdbtablx b/data/dep/dep_moderate_2050.gdb/a00000009.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..80fff7bafc841237e6eac2a6452509e7c6d80a36 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000009.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:675f950808613f4ef964e7ff52204b7898ea68112cec573994ff3db36fa15027 +size 5152 diff --git a/data/dep/dep_moderate_2050.gdb/a00000009.horizon b/data/dep/dep_moderate_2050.gdb/a00000009.horizon new file mode 100644 index 0000000000000000000000000000000000000000..11fc0ef0dc12288a1bd0361d2b73bf5e6d902525 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000009.horizon @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e290e7febb799290097cdf01bb373517c3b3404b4e1ec1e34bc4c5e3ad7361b +size 32 diff --git a/data/dep/dep_moderate_2050.gdb/a00000009.spx b/data/dep/dep_moderate_2050.gdb/a00000009.spx new file mode 100644 index 0000000000000000000000000000000000000000..0e0b9d61ad20c2e5866034116140f2e3acc13bc9 --- /dev/null +++ b/data/dep/dep_moderate_2050.gdb/a00000009.spx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc1226dafde71b5f0f4974cd65e5bbd5c16f4f580ac617c3446794ae8e859f3a +size 4118 diff --git a/data/dep/dep_moderate_2050.gdb/gdb b/data/dep/dep_moderate_2050.gdb/gdb new file mode 100644 index 0000000000000000000000000000000000000000..a786e127004dd9e94e88fda7742d248237ad8885 Binary files /dev/null and b/data/dep/dep_moderate_2050.gdb/gdb differ diff --git a/data/dep/dep_moderate_2050.gdb/timestamps b/data/dep/dep_moderate_2050.gdb/timestamps new file mode 100644 index 0000000000000000000000000000000000000000..8d11aa6e96404534bd5d13eeecdf2846a13b92c0 Binary files /dev/null and b/data/dep/dep_moderate_2050.gdb/timestamps differ diff --git a/data/dep/dep_moderate_current.gdb/a00000001.TablesByName.atx b/data/dep/dep_moderate_current.gdb/a00000001.TablesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..2d78f7c872a83ced3ec820bb31869647347dbb58 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000001.TablesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb08c35ec341de84dda9f7bf740de425e0908994cd1153d4ec1e6761c9f1fe64 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000001.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000001.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..2cb40e992459d7d96cd3292afe1589bea33baf80 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000001.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80ba95b873e151074244f7655f9e29ce8967aab1bd57ff4c06a329b723a3d46c +size 110 diff --git a/data/dep/dep_moderate_current.gdb/a00000001.gdbtable b/data/dep/dep_moderate_current.gdb/a00000001.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..ac8f331ec97b88a89b5991236a094530bf901813 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000001.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42efca4e0536ca5e6f92370aeec767bee0db49f8ad80a4ad3319c4929569e2fd +size 393 diff --git a/data/dep/dep_moderate_current.gdb/a00000001.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000001.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..48639c1b4f534813c74462a1eb7c8cc4ed0b9a35 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000001.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b65d6bdd23b973033884654531d2ed75d5c4e55fd260bf23631bbecf9f7edab5 +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000002.gdbtable b/data/dep/dep_moderate_current.gdb/a00000002.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..c199f083fbb750e47111dcb081e167ab357578ac --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000002.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9efa50dc62a40e109a546054943ae5ad4b3d41aa81ece0dfa026b7b23cb09da8 +size 2055 diff --git a/data/dep/dep_moderate_current.gdb/a00000002.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000002.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..feb01457922b40c5ad69359ce5f12a3fc4cccffb --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000002.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3ac9865dec6ab40e73d91dd1f5c2eef0248e66e359d4abb1d97edba92b5df88 +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000003.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000003.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..fcd5228d913fb3146e912510c4f60dcb2d9ef834 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000003.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de0911d18e2f304380c04ae123ea18057d262d788301f7cc3148298231c7c618 +size 42 diff --git a/data/dep/dep_moderate_current.gdb/a00000003.gdbtable b/data/dep/dep_moderate_current.gdb/a00000003.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..cfb9b8ec1fff3f23e91b0a8e1f1b283bc6f077a4 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000003.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c8869c8a6d11b5937eaa9b04e91f5e1aedb0835c762692b2518d134e7d71818 +size 1175 diff --git a/data/dep/dep_moderate_current.gdb/a00000003.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000003.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..ff5b7a1852667784216ed99d9e1f66291580698e --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000003.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d63fd5f8a490aa6aeffde1336e80ce35b5399af5752a72b316570a670b09d167 +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.CatItemsByPhysicalName.atx b/data/dep/dep_moderate_current.gdb/a00000004.CatItemsByPhysicalName.atx new file mode 100644 index 0000000000000000000000000000000000000000..a0880cd7a5702d08957db7e01f7b254aec0b0f0e --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.CatItemsByPhysicalName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f48b861462b9da9fd61628ad705036628d8cbf50f4ad794f9872411848049ce3 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.CatItemsByType.atx b/data/dep/dep_moderate_current.gdb/a00000004.CatItemsByType.atx new file mode 100644 index 0000000000000000000000000000000000000000..89815a826ee09e30e2a6257e6af09f49f6b3f70e --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.CatItemsByType.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6aa729e6634424b8507b5f151d6b38fe079b39860c0ec78d954644b9ae1383c +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.FDO_UUID.atx b/data/dep/dep_moderate_current.gdb/a00000004.FDO_UUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..163e1d6b1b296a8a4ce4be97f28ff20738db274d --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.FDO_UUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:583fea0a2b4893d9cdd6154399a1e6f050c16418ecc21c07df8dd106489d418c +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.freelist b/data/dep/dep_moderate_current.gdb/a00000004.freelist new file mode 100644 index 0000000000000000000000000000000000000000..218ac6abed660a53d06e20b7130f05093d4e0135 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.freelist @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f533624229491dca59b7e104d95911041eeeaba3672a03923f9bc88134f6ad2b +size 12632 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000004.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..4d522bfcb49b21fdd987c6d51afe3cdff16851cc --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f29887c93b126a3feb0e20958cd73cc774f76ab79284b9c7286dc12eb05db858 +size 310 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.gdbtable b/data/dep/dep_moderate_current.gdb/a00000004.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..0481806c432fd94261b4f21510e8afdd53b544d5 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0968af9176781d11754896026b0645fedd4475827ac3f91919da59007f5e70a +size 746073 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000004.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..7da1449b09523b90949c31aef67133b306af2dd7 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6bc2797c6c338c5826f9ca99eaae5407876d9a84fc63299d4b1192ddcc497a7 +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.horizon b/data/dep/dep_moderate_current.gdb/a00000004.horizon new file mode 100644 index 0000000000000000000000000000000000000000..88b384023ae287c8b4ccb9f52c9d4f83138f0a02 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.horizon @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fab34115b351a30fb6adb5818d8f08e4dd3a024b7247dbbc61c56929ab6e223 +size 32 diff --git a/data/dep/dep_moderate_current.gdb/a00000004.spx b/data/dep/dep_moderate_current.gdb/a00000004.spx new file mode 100644 index 0000000000000000000000000000000000000000..2ff5379dfb50a98451295bf32d371b3347be730b --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000004.spx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d66f878127748888185f7bd574874816ed8f96c1392fa3e0f322c5f6e080e09 +size 40982 diff --git a/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByName.atx b/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..e845e87340870356d4a47c01080989cc4df15d7f --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e901522888a8153c398e005ac61c7352e3fe920bf2b7a84d76d2b4caeee9817 +size 12310 diff --git a/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByParentTypeID.atx b/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByParentTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..2718ced910e30f3ccfc09e6a3362579ba9ab4aae --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByParentTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8ba35a73b3e10a55caae8ccf875f10ca3288d1c0d18aad51bf52182b322ec0e +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByUUID.atx b/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByUUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..cb0cb24e0633fa818cef5c78d93060f5e8d525b1 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000005.CatItemTypesByUUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:480f1f90818c3dc361c6e9ffc09330781a1c55ba52f7cb1a1925b2d791beba5e +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000005.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000005.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..16d134901f9fc0f82952f5cfc182df6836237c7d --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000005.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c21e650994b48cc72d174b3caf2ac1bd19e2d7305c8087aaae97e24caecf98a5 +size 296 diff --git a/data/dep/dep_moderate_current.gdb/a00000005.gdbtable b/data/dep/dep_moderate_current.gdb/a00000005.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..ca7570f4da64276960756f98dcc1b5d67c70b2bf --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000005.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:457b2ef15090d858e7c3e88d41131d1054b241836d2f4cc6d61d082bbf7e6aa3 +size 2071 diff --git a/data/dep/dep_moderate_current.gdb/a00000005.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000005.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..db7efb779c0426a701d7187da4b6d5be2b31f2dc --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000005.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbe2191ecb633ff3a4c3d449dd4dcd964389c72dc6686aa02f6ce6fcea31cc6f +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByDestinationID.atx b/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByDestinationID.atx new file mode 100644 index 0000000000000000000000000000000000000000..54027335e30cd7cf4e691bafd0e6b7a47971b9af --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByDestinationID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f248581df38251f0b8260983c50595e5434cd0a976a4f182d25fd1b61fac2844 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByOriginID.atx b/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByOriginID.atx new file mode 100644 index 0000000000000000000000000000000000000000..4ace00fb59d1b8cd640ef24060fa7c6d1a4acd78 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByOriginID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1b6d7caae2687fbe27c91d9f4ac69d5879235ffe6e9f2f008c4f0dc6f5f7634 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByType.atx b/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByType.atx new file mode 100644 index 0000000000000000000000000000000000000000..0ee3e4343b3ae3c11eb717c6798dea7cbec8590d --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.CatRelsByType.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b48641a75f673005c01d96908edfa4313d1fad7ffb4306e9789ab3a97c73974 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.FDO_UUID.atx b/data/dep/dep_moderate_current.gdb/a00000006.FDO_UUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..79269f07de24f7ab44c5ac6289bdecaceb4d3d0c --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.FDO_UUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a379d7db1c975b59ae0b74ca3e3c60fa1575a1f8fbe2c5709f5aa5f54696e45 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000006.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..8dbf1dcde75c5bec68e67f59a4f604865d70bc67 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1919a00326f198a90dc97545ec448c10b2b90d367020222212f7b00433fe09b5 +size 318 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.gdbtable b/data/dep/dep_moderate_current.gdb/a00000006.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..79a0595f714c5724e1edf6dddfd7abca1c785f4b --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6585e2eead7c35134b9943fa41691e99f1020040997e43e4ceb89d59360a842e +size 336 diff --git a/data/dep/dep_moderate_current.gdb/a00000006.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000006.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..acc009a83b5c2cfe0952a8b5686e64b755aa5cb4 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000006.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24c4808826c22c92628c9f4d16cc80287e08b7fe2ae820e73a8599d96eacfe97 +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByBackwardLabel.atx b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByBackwardLabel.atx new file mode 100644 index 0000000000000000000000000000000000000000..cd20e7262134cda019fc4ebd67ba5e74eac6e794 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByBackwardLabel.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d51717d5b9de760d2488edb544637d875f131ad96483b96bf95aecff7f679d5 +size 12310 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByDestItemTypeID.atx b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByDestItemTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..dbcb3fa4bf902ea0d8296b47de5cc8ab81c1dcb6 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByDestItemTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:655a5548325b8adf939e69851f9f52ae3b61244d998df60811169142a1a77648 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByForwardLabel.atx b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByForwardLabel.atx new file mode 100644 index 0000000000000000000000000000000000000000..a055d8a0f275c5fa40737c628693a552e1d7e846 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByForwardLabel.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3471d1b80a6cc2ac1a094743a52f72a6dd6bbcca787604cacaa0ba409a9b583 +size 12310 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByName.atx b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByName.atx new file mode 100644 index 0000000000000000000000000000000000000000..3a23aafca7a030bf250060d009e1a2244e978960 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByName.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4d57cabb14dc7b1f3510441e0b5ebd94f01e7412f272993b98502eea3d9577 +size 12310 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx new file mode 100644 index 0000000000000000000000000000000000000000..ca7146e89ee908dd68893f736ee7a5f1f50aeeb9 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByOriginItemTypeID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c7f665eb045df2a910a6800d0919af507bed2a42d2e9547a4004ba235fec5de +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByUUID.atx b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByUUID.atx new file mode 100644 index 0000000000000000000000000000000000000000..0f695a37c51f948a3d05c431df0c8bd8dfa6f36a --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.CatRelTypesByUUID.atx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:209099cc5bdb713a603488740c831b805af7138397499f5d946a2c8aed69d446 +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000007.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..d9e21aab9dc1fb2f3598c153db5e7d3008bf5166 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc98e24d04eddd7f38905d176c1ea43c547a3a2daa0c80091a20d3443c726621 +size 602 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.gdbtable b/data/dep/dep_moderate_current.gdb/a00000007.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..178021a866e94c0c2cede66d9b132220ad1fa071 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cc24256e330ab468b5c594309b1493c2028fe072c9f13e5fe41b2084ce9a16 +size 3626 diff --git a/data/dep/dep_moderate_current.gdb/a00000007.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000007.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..9e13ad03f7ea312986f4082415309ae16b0896b1 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000007.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d81aa3c399da5feba68a24ae6461edfca18cfaa9d81a1f1dab9b8b8a64e590c +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000009.gdbindexes b/data/dep/dep_moderate_current.gdb/a00000009.gdbindexes new file mode 100644 index 0000000000000000000000000000000000000000..411a21b962bc28575095cac36b825dc2740869e8 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000009.gdbindexes @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760bd4864717c60d5e7c0c6461a622062680e6f79f51250f49456aa609b43774 +size 116 diff --git a/data/dep/dep_moderate_current.gdb/a00000009.gdbtable b/data/dep/dep_moderate_current.gdb/a00000009.gdbtable new file mode 100644 index 0000000000000000000000000000000000000000..ddb5501cf563db2cdabb84656899bce15305beb0 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000009.gdbtable @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a1e13400c7e19a4859721d796841025a9ae43de9fe182495d6b1ace3c6f573d +size 4829512 diff --git a/data/dep/dep_moderate_current.gdb/a00000009.gdbtablx b/data/dep/dep_moderate_current.gdb/a00000009.gdbtablx new file mode 100644 index 0000000000000000000000000000000000000000..05bec39e2fe80939e05ec6115319dbd97f2ce3ed --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000009.gdbtablx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d4564b250d965ca86bc1d496e3d6b51e6d3a5a84fe7aaecdca670ce832263b8 +size 5152 diff --git a/data/dep/dep_moderate_current.gdb/a00000009.horizon b/data/dep/dep_moderate_current.gdb/a00000009.horizon new file mode 100644 index 0000000000000000000000000000000000000000..5c53f7e90d4127ee3ce28fc173278972653fa297 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000009.horizon @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73f51c36d09203210a9d16b125c20b9332b23ca525e51515a7984c87392f0f04 +size 32 diff --git a/data/dep/dep_moderate_current.gdb/a00000009.spx b/data/dep/dep_moderate_current.gdb/a00000009.spx new file mode 100644 index 0000000000000000000000000000000000000000..081dadbb2775c2e0ff861b3d4d1d448f90c57162 --- /dev/null +++ b/data/dep/dep_moderate_current.gdb/a00000009.spx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2bb119554f18e176cea6a629efdf4baaa0eb2b7ba78874cac58524c74f86cfa +size 4118 diff --git a/data/dep/dep_moderate_current.gdb/gdb b/data/dep/dep_moderate_current.gdb/gdb new file mode 100644 index 0000000000000000000000000000000000000000..a786e127004dd9e94e88fda7742d248237ad8885 Binary files /dev/null and b/data/dep/dep_moderate_current.gdb/gdb differ diff --git a/data/dep/dep_moderate_current.gdb/timestamps b/data/dep/dep_moderate_current.gdb/timestamps new file mode 100644 index 0000000000000000000000000000000000000000..1c24019efb1fa8d3f45a6463ccef87af70f9a594 Binary files /dev/null and b/data/dep/dep_moderate_current.gdb/timestamps differ diff --git a/data/hand.tif b/data/hand.tif new file mode 100644 index 0000000000000000000000000000000000000000..173e7206d3b2b90c47494c364ff2e2f6aec6871d --- /dev/null +++ b/data/hand.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7c2b97da1de805e457b7dc8007b7bc2fc8129d179cb3b415c4bca80d47e6616 +size 16763776 diff --git a/data/hospitals.geojson b/data/hospitals.geojson new file mode 100644 index 0000000000000000000000000000000000000000..56357dc704d4b133cdd46894636e781d5e045269 --- /dev/null +++ b/data/hospitals.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:164004fd98b88200014cb2da8cdb03f76bb456ec5d60820dd4e0da4da2e5f679 +size 30022 diff --git a/data/ida_2021_hwms_ny.geojson b/data/ida_2021_hwms_ny.geojson new file mode 100644 index 0000000000000000000000000000000000000000..bd27bb7bed3292f4b301c42a62a1b8f2e42b7ce6 --- /dev/null +++ b/data/ida_2021_hwms_ny.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:948b8cc6fcdbfed1d5a6dc3ac59d4a616c012b19a2a4bfa950d2ac1893ce2568 +size 65474 diff --git a/data/mta_entrances.geojson b/data/mta_entrances.geojson new file mode 100644 index 0000000000000000000000000000000000000000..6a03a290b189d06ddedf6e6b93ec9d842101ecfa --- /dev/null +++ b/data/mta_entrances.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f5cc2ac27763a44bc7c9166e229f221dd5ffb285bf148bc4c12a326b23b8072 +size 939323 diff --git a/data/nyc_dem_30m.tif b/data/nyc_dem_30m.tif new file mode 100644 index 0000000000000000000000000000000000000000..df7c48b669f48a08e6ec4528374eebbd53b624b0 --- /dev/null +++ b/data/nyc_dem_30m.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98295d9d9be7e47302a5a55ee30c69f1e1b2f69c917f6f8a13f8bd16a2fbe45d +size 15608607 diff --git a/data/nyc_ntas_2020.geojson b/data/nyc_ntas_2020.geojson new file mode 100644 index 0000000000000000000000000000000000000000..bc9db34cd1c6dce0fc49dcfecff8c2bd94496ef7 --- /dev/null +++ b/data/nyc_ntas_2020.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb5f1759872c8fa7ed18f7430c971b3b238a68927e0046d06d1ddb4ce90fc26b +size 4589872 diff --git a/data/nycha.geojson b/data/nycha.geojson new file mode 100644 index 0000000000000000000000000000000000000000..2b9f0f5cf0e48b81106de4cdb932b9e74fcdbb71 --- /dev/null +++ b/data/nycha.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f3a4da7938f0e70c00b8497aabdc0e5dede8e9bc4940abbf0c35171ba831fa4 +size 308085 diff --git a/data/prithvi_ida_2021.geojson b/data/prithvi_ida_2021.geojson new file mode 100644 index 0000000000000000000000000000000000000000..f0619a4874156c574dfe57a9cef47aaab690cce4 --- /dev/null +++ b/data/prithvi_ida_2021.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ee784aa4f72a76baa1c09094f4a7bcc757860beb6e031a7656a3a91062a17b6 +size 2046423 diff --git a/data/registers/mta_entrances.json b/data/registers/mta_entrances.json new file mode 100644 index 0000000000000000000000000000000000000000..63ea1ec60a478f647575f56b5357e3feaa5ad6ec --- /dev/null +++ b/data/registers/mta_entrances.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae10b7a09860adaae9b9ef056c0b1c4bc5a8ea8ee127b83f84a3957cfee2c4c4 +size 1465145 diff --git a/data/registers/nycha.json b/data/registers/nycha.json new file mode 100644 index 0000000000000000000000000000000000000000..6c2cc306af1e101afc5ae3b4f04c75c4cb1c14ec --- /dev/null +++ b/data/registers/nycha.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:429ee990e27b90cb824678eaf7679cd6efc385103c564d1acc97b39766183a0e +size 160508 diff --git a/data/registers/schools.json b/data/registers/schools.json new file mode 100644 index 0000000000000000000000000000000000000000..31dc06f04b5e05ce5e00a633d5d5ba312e9c71ed --- /dev/null +++ b/data/registers/schools.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fb2aa47325d8214453016e0f7813650e4b347b2976edef0590ed9a5af0d6058 +size 448698 diff --git a/data/sandy_inundation.geojson b/data/sandy_inundation.geojson new file mode 100644 index 0000000000000000000000000000000000000000..504465ed62f3173f08d64c31da971790e2facd23 --- /dev/null +++ b/data/sandy_inundation.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d5b899acc144a422ca340eabe100c91c5fe110e2b13e62216e61fdc07b00200 +size 91392952 diff --git a/data/schools.geojson b/data/schools.geojson new file mode 100644 index 0000000000000000000000000000000000000000..c06fcc9eb914c2eb52c537bc129b8d9f0e4de1b7 --- /dev/null +++ b/data/schools.geojson @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d67aa09b8385d984ed41caf042431c5e86406143e0cc14b52a2fc5d3d308f888 +size 897297 diff --git a/data/twi.tif b/data/twi.tif new file mode 100644 index 0000000000000000000000000000000000000000..11107353dbfbe13a65f0e39d20f976e526d6c683 --- /dev/null +++ b/data/twi.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feaa1e615827687038ef5a02a141b94c84c82694b516b35955e55099e60a3209 +size 21347040 diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..ca5a73eecadb4fe2d9a64c0f45cc713af7f69dda --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env sh +# Entrypoint for the personal HF Space (msradam/riprap-nyc) on L4. +# +# Boots three things in order: +# 1. Ollama serve → granite4.1:8b on localhost:11434 +# 2. riprap-models → Prithvi/TerraMind/TTM/GLiNER/Embedding on :7861 +# 3. web.main → FastAPI + SSE on :7860 (HF Spaces public port) +# +# The 8B is baked into the image (see Dockerfile.l4); the EO toolchain +# (terratorch + deps) installs at runtime to keep the build sandbox +# under its disk threshold. ~2 minutes on first cold start; cached +# thereafter for the lifetime of the image. + +set -e + +# --- 1. EO toolchain (runtime-installed; same pattern as the canonical +# entrypoint.sh) ------------------------------------------------- +EO_DIR="$HOME/.eo-pkgs" +EO_MARKER="$EO_DIR/.installed" +if [ ! -f "$EO_MARKER" ]; then + echo "[entrypoint.l4] installing EO toolchain into $EO_DIR ..." + mkdir -p "$EO_DIR" + # torchvision is now baked into the base image (Dockerfile.l4) so + # don't re-install it here — the EO_DIR shadowing copy was the + # source of the `torchvision::nms does not exist` runtime error. + if pip install --no-cache-dir --no-deps --target="$EO_DIR" \ + terratorch==1.1rc6 einops diffusers timm; then + if PYTHONPATH="$EO_DIR:$PYTHONPATH" python -c " +import terratorch +import terratorch.models.backbones.terramind.model.terramind_register +from terratorch.registry import FULL_MODEL_REGISTRY +n = len([k for k in FULL_MODEL_REGISTRY if 'terramind' in k.lower()]) +assert n > 0 +print(f'[entrypoint.l4] terratorch ok ({n} terramind entries)') +"; then + touch "$EO_MARKER" + echo "[entrypoint.l4] EO toolchain READY" + else + echo "[entrypoint.l4] EO verify FAILED — Prithvi/TerraMind probes will skip" + fi + else + echo "[entrypoint.l4] pip install FAILED — Prithvi/TerraMind probes will skip" + fi +else + echo "[entrypoint.l4] EO toolchain already installed (cached)" +fi +export PYTHONPATH="$EO_DIR:$PYTHONPATH" + +# --- 2. Ollama serve -------------------------------------------------- +LOG_OLLAMA="$HOME/ollama.log" +ollama serve 2>&1 | tee "$LOG_OLLAMA" & +OLLAMA_PID=$! + +for i in $(seq 1 60); do + if curl -sf http://127.0.0.1:11434/ > /dev/null 2>&1; then + echo "[entrypoint.l4] ollama up (pid $OLLAMA_PID) after ${i}s" + break + fi + if ! kill -0 "$OLLAMA_PID" 2>/dev/null; then + echo "[entrypoint.l4] FATAL: ollama serve died" + tail -40 "$LOG_OLLAMA" || true + exit 1 + fi + sleep 1 +done + +# Granite 4.1:8b is baked. Pre-warm into VRAM so the first reconcile +# doesn't pay the ~30s model-load tax. +echo "[entrypoint.l4] pre-warming granite4.1:8b into VRAM ..." +curl -s -X POST http://127.0.0.1:11434/api/generate \ + -d '{"model":"granite4.1:8b","prompt":"hi","stream":false,"keep_alive":"24h","options":{"num_predict":1}}' \ + -o /dev/null --max-time 120 \ + && echo "[entrypoint.l4] granite warm" \ + || echo "[entrypoint.l4] WARNING: granite warmup failed (will load lazily)" + +# --- 3. riprap-models on :7861 --------------------------------------- +# Same FastAPI app the AMD droplet runs, just rehosted in-process here +# so app/inference.py's RIPRAP_ML_BASE_URL points at localhost. +LOG_MODELS="$HOME/riprap-models.log" +uvicorn riprap_models:app --host 127.0.0.1 --port 7861 --log-level info \ + > "$LOG_MODELS" 2>&1 & +MODELS_PID=$! + +for i in $(seq 1 60); do + if curl -sf http://127.0.0.1:7861/healthz > /dev/null 2>&1; then + echo "[entrypoint.l4] riprap-models up (pid $MODELS_PID) after ${i}s" + break + fi + if ! kill -0 "$MODELS_PID" 2>/dev/null; then + echo "[entrypoint.l4] FATAL: riprap-models died" + tail -40 "$LOG_MODELS" || true + exit 1 + fi + sleep 1 +done + +# --- GPU sanity -------------------------------------------------------- +if command -v nvidia-smi > /dev/null 2>&1; then + echo "[entrypoint.l4] nvidia-smi:" + nvidia-smi -L || true +else + echo "[entrypoint.l4] WARNING: nvidia-smi missing — running on CPU" +fi + +# --- 4. Web app (foreground) ----------------------------------------- +exec uvicorn web.main:app --host 0.0.0.0 --port 7860 --log-level info diff --git a/inference-vllm/Dockerfile b/inference-vllm/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7871bf6f1158556048503515c88a4c4b6b8a5d39 --- /dev/null +++ b/inference-vllm/Dockerfile @@ -0,0 +1,100 @@ +# Riprap vLLM Space — primary inference backend (msradam/riprap-vllm). +# +# Same shape as the Ollama riprap-inference Space, but with vLLM +# serving Granite 4.1 8B FP8 natively on the L4. FP8 is hardware- +# supported on Ada Lovelace (L4 = sm_89), so no Marlin/AWQ kernels +# needed. +# +# Two backends: +# - vLLM on :8000 — granite-4.1-8b-fp8, OpenAI-compatible +# - riprap-models on :7861 — Prithvi/TerraMind/TTM/GLiNER/Embedding +# A FastAPI bearer-auth proxy on :7860 routes /v1/* to whichever +# backend serves it. +# +# VRAM budget on a 24 GB L4: +# Granite 4.1 8B FP8 ~8 GB +# vLLM KV cache (gpu_util) ~3 GB +# EO model stack ~10 GB (Prithvi + TerraMind + TTM) +# Headroom ~3 GB +# We cap vLLM at gpu_memory_utilization=0.55 so it doesn't grab the +# whole device. + +# Ubuntu 24.04 ships Python 3.12; needed because terratorch 1.1rc6 pulls +# torchgeo>=0.7.0 which itself requires Python 3.11+. NVIDIA's earliest +# CUDA + ubuntu24.04 + cudnn tag is 12.6.0; CUDA 12.6 is forward-compat +# with the cu124 PyTorch wheels we install below. +FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04 AS base + +ENV DEBIAN_FRONTEND=noninteractive \ + PIP_BREAK_SYSTEM_PACKAGES=1 +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 python3-pip python3-venv python-is-python3 \ + curl ca-certificates git zstd procps \ + gcc \ + gdal-bin libgdal-dev libgeos-dev libproj-dev \ + libgl1 libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + +# Ubuntu 24.04 ships with a default `ubuntu` user at UID 1000; remove +# it before creating the HF Spaces convention `user` at UID 1000. +RUN userdel -r ubuntu 2>/dev/null || true && useradd -m -u 1000 user +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:/usr/local/bin:/usr/bin:/bin \ + PYTHONUNBUFFERED=1 \ + HF_HOME=/home/user/.cache/huggingface \ + TRANSFORMERS_CACHE=/home/user/.cache/huggingface + +WORKDIR /home/user/app + +# vLLM brings its own torch / cu124. Pin a known-working version. +# Skip `pip install --upgrade pip` — Ubuntu 24.04's debian-managed pip +# 24.0 lacks the RECORD file so pip can't cleanly uninstall it, and the +# stock 24.0 is new enough for our needs anyway. +RUN pip install --no-cache-dir \ + vllm==0.7.3 \ + fastapi>=0.115 \ + uvicorn[standard]>=0.32 \ + httpx>=0.27 \ + pydantic>=2.9 \ + nvidia-ml-py>=12.560 + +# riprap-models specialist deps. Layered after vLLM so its torch wins. +COPY services/riprap-models/requirements.txt /tmp/req-models.txt +RUN pip install --no-cache-dir -r /tmp/req-models.txt + +RUN pip install --no-cache-dir \ + peft==0.18.1 \ + granite-tsfm==0.3.3 \ + "sentence-transformers>=3.3,<4" \ + "gliner>=0.2.6" \ + torchvision + +# Bake terratorch with its transitive deps at build time (vs the canonical +# entrypoint which runtime-installs with --no-deps to dodge the CPU Space's +# tight build sandbox). On L4 we have build room; full install lets all +# the EO probes work without dep-chase whack-a-mole. +RUN pip install --no-cache-dir \ + terratorch==1.1rc6 \ + einops diffusers timm \ + albumentations \ + segmentation-models-pytorch \ + kornia \ + tifffile + +# Bake Granite 4.1 8B FP8 weights into the image (~8 GB). vLLM auto- +# detects the FP8 config in the model's config.json. +ENV VLLM_MODEL=ibm-granite/granite-4.1-8b-fp8 +RUN python -c "from huggingface_hub import snapshot_download; \ + snapshot_download(repo_id='$VLLM_MODEL', cache_dir='/home/user/.cache/huggingface')" + +# Service code. The deploy script moves these to the repo root. +COPY services/riprap-models/main.py ./riprap_models.py +COPY proxy.py ./proxy.py +COPY entrypoint.sh ./entrypoint.sh +RUN chmod +x ./entrypoint.sh + +RUN chown -R user:user /home/user +USER user + +EXPOSE 7860 +CMD ["./entrypoint.sh"] diff --git a/inference-vllm/entrypoint.sh b/inference-vllm/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..20ecea092181fd3876a469e6f43f9cd6ddfa7fcf --- /dev/null +++ b/inference-vllm/entrypoint.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env sh +# vLLM Space entrypoint: vLLM + riprap-models + FastAPI bearer-auth proxy. + +set -e + +# EO toolchain (terratorch + transitive deps) is now baked into the +# image at build time, not runtime-installed. Verify import once at +# startup so the trace makes the failure visible if a wheel goes +# stale. +python -c " +import terratorch +import terratorch.models.backbones.terramind.model.terramind_register +from terratorch.registry import FULL_MODEL_REGISTRY +n = len([k for k in FULL_MODEL_REGISTRY if 'terramind' in k.lower()]) +assert n > 0 +print(f'[entrypoint.vllm] terratorch ok ({n} terramind entries)') +" || echo "[entrypoint.vllm] WARN: terratorch import failed — TerraMind probes will skip" + +# --- 1. vLLM (Granite 4.1 8B FP8) on :8000 -------------------------- +LOG_VLLM="$HOME/vllm.log" +# --gpu-memory-utilization 0.55: vLLM gets ~12.4 GB of 22.5 GB L4. +# After FP8 weights (~8 GB) that leaves ~4.4 GB for KV cache (4096 ctx). +# riprap-models gets the remaining ~10 GB. Models are loaded sequentially +# (vLLM first, then riprap-models) so peak overlap is low. +python -m vllm.entrypoints.openai.api_server \ + --model ibm-granite/granite-4.1-8b-fp8 \ + --served-model-name granite4.1:8b granite-4.1-8b ibm-granite/granite-4.1-8b-fp8 \ + --host 127.0.0.1 \ + --port 8000 \ + --gpu-memory-utilization 0.55 \ + --max-model-len 4096 \ + --enforce-eager \ + --guided-decoding-backend lm-format-enforcer \ + --disable-log-requests \ + > "$LOG_VLLM" 2>&1 & +VLLM_PID=$! + +for i in $(seq 1 240); do + if curl -sf http://127.0.0.1:8000/health > /dev/null 2>&1; then + echo "[entrypoint.vllm] vLLM up (pid $VLLM_PID) after ${i}s" + break + fi + if ! kill -0 "$VLLM_PID" 2>/dev/null; then + echo "[entrypoint.vllm] FATAL: vLLM died" + echo "=== vllm.log grep (Error|OOM|CUDA|ValueError|killed) ===" + grep -iE "error|out of memory|killed|valueerror|cuda|exception|failed" "$LOG_VLLM" | tail -40 || true + echo "=== vllm.log tail-30 ===" + tail -30 "$LOG_VLLM" || true + exit 1 + fi + sleep 1 +done + +if ! curl -sf http://127.0.0.1:8000/health > /dev/null 2>&1; then + echo "[entrypoint.vllm] FATAL: vLLM did not become ready within 240s" + echo "=== vllm.log grep (Error|OOM|CUDA|ValueError|killed) ===" + grep -iE "error|out of memory|killed|valueerror|cuda|exception|failed" "$LOG_VLLM" | tail -40 || true + echo "=== vllm.log tail-30 ===" + tail -30 "$LOG_VLLM" || true + exit 1 +fi + +# Background watchdog: if vLLM dies after startup, restart it. +_start_vllm() { + python -m vllm.entrypoints.openai.api_server \ + --model ibm-granite/granite-4.1-8b-fp8 \ + --served-model-name granite4.1:8b granite-4.1-8b ibm-granite/granite-4.1-8b-fp8 \ + --host 127.0.0.1 \ + --port 8000 \ + --gpu-memory-utilization 0.55 \ + --max-model-len 4096 \ + --enforce-eager \ + --disable-log-requests \ + >> "$LOG_VLLM" 2>&1 & + echo $! +} +_vllm_watchdog() { + local pid=$1 + while true; do + sleep 30 + if ! kill -0 "$pid" 2>/dev/null; then + echo "[watchdog] vLLM died — restarting..." >> "$LOG_VLLM" + pid=$(_start_vllm) + echo "[watchdog] vLLM restarted as pid $pid" >> "$LOG_VLLM" + fi + done +} +_vllm_watchdog "$VLLM_PID" & + +# --- 2. riprap-models on :7861 -------------------------------------- +LOG_MODELS="$HOME/riprap-models.log" +uvicorn riprap_models:app --host 127.0.0.1 --port 7861 --log-level info \ + > "$LOG_MODELS" 2>&1 & +MODELS_PID=$! + +for i in $(seq 1 60); do + if curl -sf http://127.0.0.1:7861/healthz > /dev/null 2>&1; then + echo "[entrypoint.vllm] riprap-models up (pid $MODELS_PID) after ${i}s" + break + fi + if ! kill -0 "$MODELS_PID" 2>/dev/null; then + echo "[entrypoint.vllm] FATAL: riprap-models died" + tail -40 "$LOG_MODELS" || true + exit 1 + fi + sleep 1 +done + +# --- 3. GPU sanity -------------------------------------------------- +if command -v nvidia-smi > /dev/null 2>&1; then + nvidia-smi -L || true +fi + +# --- 4. FastAPI bearer-auth proxy on :7860 (foreground) ------------- +exec uvicorn proxy:app --host 0.0.0.0 --port 7860 --log-level info diff --git a/inference-vllm/proxy.py b/inference-vllm/proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..0b0e6c4241514fd8fb3eee9f709aa25685fff596 --- /dev/null +++ b/inference-vllm/proxy.py @@ -0,0 +1,398 @@ +"""Riprap vLLM Space — bearer-auth proxy on port 7860. + +Forwards /v1/chat/completions and /v1/completions to vLLM on +localhost:8000 (which exposes the OpenAI-compatible surface +natively), and forwards specialist endpoints to riprap-models on +localhost:7861. + +Same auth shape as the Ollama-backed riprap-inference Space — both +UI Spaces (lablab + msradam/riprap) carry the shared bearer in +RIPRAP_LLM_API_KEY. + +GPU power +--------- +A background sampler reads `nvmlDeviceGetPowerUsage` every 100 ms +into a ring buffer. Each forwarded POST records the wall-clock +window of the upstream call and reports: + + X-GPU-Power-W mean draw (W) across the window + X-GPU-Energy-J energy (J) over the window + X-GPU-Duration-S forwarded-call duration in seconds + +The `/v1/power` GET also exposes the instantaneous reading for +clients that prefer to bracket their own work with two samples +(used by the LLM client path where LiteLLM hides response headers). + +Reading from NVML costs <1 ms per sample; the ring buffer holds 60 s +of 100 ms samples (600 entries). Sampler degrades to a no-op if NVML +init fails (unlikely on an L4 Space, but possible on CPU-only sims). +""" +from __future__ import annotations + +import asyncio +import logging +import os +import time +from collections import deque +from typing import AsyncIterator + +import httpx +from fastapi import FastAPI, HTTPException, Request +from fastapi.responses import JSONResponse, Response, StreamingResponse + +log = logging.getLogger("riprap.proxy") + +VLLM_URL = "http://127.0.0.1:8000" +MODELS_URL = "http://127.0.0.1:7861" + +PROXY_TOKEN = os.environ.get("RIPRAP_PROXY_TOKEN", "") + +app = FastAPI(title="Riprap vLLM Proxy") + + +# --------------------------------------------------------------------------- +# GPU power sampler +# --------------------------------------------------------------------------- + +# Ring buffer of (unix_ts, power_w) samples. 600 entries × 100 ms = 60 s +# of history, which covers the longest single inference call we'd see +# (vLLM cold-compile is ~120 s but that surfaces as multiple shorter +# reads from inside vLLM's loop, not as a single forwarded POST). +_SAMPLES: deque[tuple[float, float]] = deque(maxlen=600) +_SAMPLER_TASK: asyncio.Task | None = None +_NVML_OK: bool = False +_NVML_HANDLE = None +_NVML_ERR: str | None = None + + +def _init_nvml() -> None: + """Best-effort NVML init. On failure we record the error string and + leave _NVML_OK=False — the proxy still serves traffic, just without + real power data.""" + global _NVML_OK, _NVML_HANDLE, _NVML_ERR + try: + import pynvml + pynvml.nvmlInit() + # Single-GPU L4 Space — device 0 is the L4. If a future deploy + # uses multi-GPU we'd average across handles, but that's not + # the current shape. + _NVML_HANDLE = pynvml.nvmlDeviceGetHandleByIndex(0) + # Probe once to confirm power query works. + pynvml.nvmlDeviceGetPowerUsage(_NVML_HANDLE) + _NVML_OK = True + log.info("NVML initialized for GPU 0") + except Exception as e: # noqa: BLE001 + _NVML_ERR = f"{type(e).__name__}: {e}" + _NVML_OK = False + log.warning("NVML init failed (%s); power data will be unavailable", + _NVML_ERR) + + +def _read_power_w() -> float | None: + """Instantaneous package power in watts. None if NVML is dead.""" + if not _NVML_OK: + return None + try: + import pynvml + # nvmlDeviceGetPowerUsage returns milliwatts. + mw = pynvml.nvmlDeviceGetPowerUsage(_NVML_HANDLE) + return mw / 1000.0 + except Exception: + return None + + +async def _power_sampler() -> None: + """Background loop, 100 ms cadence. Cheap (~1 ms NVML query).""" + while True: + p = _read_power_w() + if p is not None: + _SAMPLES.append((time.time(), p)) + await asyncio.sleep(0.1) + + +def _avg_power_over(t0: float, t1: float) -> float | None: + """Mean of samples in the [t0, t1] window. Returns None when no + samples landed in the window (callers fall back to a single + instantaneous read).""" + if not _SAMPLES: + return None + bucket = [p for ts, p in _SAMPLES if t0 <= ts <= t1] + if not bucket: + # Window may be too short / sampler hadn't ticked yet — return + # the most recent reading we have as the next-best signal. + return _SAMPLES[-1][1] if _SAMPLES else None + return sum(bucket) / len(bucket) + + +@app.on_event("startup") +async def _startup() -> None: + _init_nvml() + if _NVML_OK: + global _SAMPLER_TASK + _SAMPLER_TASK = asyncio.create_task(_power_sampler()) + + +@app.on_event("shutdown") +async def _shutdown() -> None: + if _SAMPLER_TASK is not None: + _SAMPLER_TASK.cancel() + if _NVML_OK: + try: + import pynvml + pynvml.nvmlShutdown() + except Exception: + pass + + +# --------------------------------------------------------------------------- +# Auth + routing +# --------------------------------------------------------------------------- + + +def _check_auth(request: Request) -> None: + if not PROXY_TOKEN: + raise HTTPException(503, "RIPRAP_PROXY_TOKEN not set on the inference Space") + auth = request.headers.get("authorization", "") + if not auth.startswith("Bearer "): + raise HTTPException(401, "missing bearer token") + if auth.removeprefix("Bearer ").strip() != PROXY_TOKEN: + raise HTTPException(401, "invalid bearer token") + + +@app.get("/") +def root(): + return {"service": "riprap-vllm", "ok": True, + "nvml": _NVML_OK, + "nvml_err": None if _NVML_OK else _NVML_ERR} + + +@app.get("/vllm-log", include_in_schema=False) +async def vllm_log(request: Request, lines: int = 100) -> Response: + """Last N lines of $HOME/vllm.log — operator diagnostic.""" + _check_auth(request) + import os + log_path = os.path.join(os.environ.get("HOME", "/home/user"), "vllm.log") + try: + with open(log_path) as f: + tail = f.readlines()[-lines:] + return JSONResponse({"ok": True, "path": log_path, + "lines": len(tail), "log": "".join(tail)}) + except FileNotFoundError: + return JSONResponse({"ok": False, "err": "vllm.log not found"}, status_code=404) + except Exception as e: + return JSONResponse({"ok": False, "err": str(e)}, status_code=500) + + +@app.get("/healthz") +async def healthz(): + out = {"proxy": "ok", "nvml": "ok" if _NVML_OK else f"err: {_NVML_ERR}"} + async with httpx.AsyncClient(timeout=5) as client: + try: + r = await client.get(f"{VLLM_URL}/health") + out["vllm"] = "ok" if r.status_code == 200 else f"http_{r.status_code}" + except Exception as e: + out["vllm"] = f"err: {type(e).__name__}" + try: + r = await client.get(f"{MODELS_URL}/healthz") + if r.status_code == 200: + out["riprap_models"] = "ok" + # Bubble up the loaded-model list + last-error map so + # operators can diagnose without hitting /v1/diag. + try: + body = r.json() + out["riprap_models_loaded"] = body.get("models_loaded") + out["riprap_models_last_errors"] = body.get("last_errors") + except Exception: + pass + else: + out["riprap_models"] = f"http_{r.status_code}" + except Exception as e: + out["riprap_models"] = f"err: {type(e).__name__}" + return out + + +@app.get("/v1/diag", include_in_schema=False) +async def diag(request: Request) -> Response: + """Forward to riprap-models /v1/diag (auth-required). + Operator-only diagnostic snapshot — what's loaded, last per-stage + error with traceback tail, and CUDA memory state per device.""" + _check_auth(request) + async with httpx.AsyncClient(timeout=10) as client: + try: + r = await client.get(f"{MODELS_URL}/v1/diag") + except Exception as e: + return JSONResponse({"ok": False, + "err": f"upstream: {type(e).__name__}: {e}"}, + status_code=503) + return Response(content=r.content, + status_code=r.status_code, + media_type=r.headers.get("content-type", "application/json")) + + + + +@app.get("/v1/power") +async def power(request: Request) -> Response: + """Instantaneous and recent-window GPU power (W). + + Used by the LLM client path: LiteLLM doesn't surface response + headers, so the client samples /v1/power before/after its + chat.completions call to bracket the energy reading. The recent + 1-second average smooths over the 100 ms sampler cadence. + """ + _check_auth(request) + if not _NVML_OK: + return JSONResponse( + {"ok": False, "err": _NVML_ERR or "NVML unavailable"}, + status_code=503, + ) + now = time.time() + inst = _read_power_w() + avg_1s = _avg_power_over(now - 1.0, now) if _SAMPLES else None + avg_5s = _avg_power_over(now - 5.0, now) if _SAMPLES else None + return JSONResponse({ + "ok": True, + "ts": now, + "power_w": inst, + "power_w_avg_1s": avg_1s, + "power_w_avg_5s": avg_5s, + "samples_held": len(_SAMPLES), + "device": "NVIDIA L4", + }) + + +# --------------------------------------------------------------------------- +# Forwarding +# --------------------------------------------------------------------------- + + +async def _stream_passthrough(upstream: httpx.Response, + add_headers: dict[str, str]) -> AsyncIterator[bytes]: + # Streaming responses can't carry headers added after the first byte — + # we set them on the StreamingResponse before yielding. add_headers + # is captured by reference in the caller via a closure. + async for chunk in upstream.aiter_raw(): + yield chunk + + +async def _proxy_post(upstream_base: str, path: str, request: Request, + *, timeout: float = 300.0) -> Response: + body = await request.body() + headers = { + "content-type": request.headers.get("content-type", "application/json"), + "accept": request.headers.get("accept", "*/*"), + } + is_stream = b'"stream":true' in body or b'"stream": true' in body + client = httpx.AsyncClient(timeout=timeout) + upstream_req = client.build_request( + "POST", f"{upstream_base}{path}", content=body, headers=headers + ) + + t0 = time.time() + upstream = await client.send(upstream_req, stream=is_stream) + + if is_stream: + # We can't measure end-of-stream wallclock without consuming the + # body, but the client sees per-call duration on its side. For + # streaming we record headers describing only the start power + # snapshot — useful as a sanity signal, not a true energy. + snap = _read_power_w() + hdrs = { + "x-gpu-power-w": f"{snap:.2f}" if snap is not None else "", + "x-gpu-stream": "1", + } + return StreamingResponse( + _stream_passthrough(upstream, hdrs), + status_code=upstream.status_code, + media_type=upstream.headers.get("content-type", "text/event-stream"), + headers=hdrs, + background=upstream.aclose, + ) + + content = await upstream.aread() + await upstream.aclose() + await client.aclose() + t1 = time.time() + duration_s = max(t1 - t0, 0.0) + + extra_headers: dict[str, str] = {} + if _NVML_OK: + avg_w = _avg_power_over(t0, t1) + if avg_w is not None: + energy_j = avg_w * duration_s + extra_headers = { + "x-gpu-power-w": f"{avg_w:.3f}", + "x-gpu-energy-j": f"{energy_j:.3f}", + "x-gpu-duration-s": f"{duration_s:.3f}", + "x-gpu-device": "NVIDIA L4", + } + + media_type = upstream.headers.get("content-type", "application/json") + response_headers = dict(extra_headers) + return Response( + content=content, + status_code=upstream.status_code, + media_type=media_type, + headers=response_headers, + ) + + +# vLLM (OpenAI-compat) routes +@app.post("/v1/chat/completions") +async def chat_completions(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(VLLM_URL, "/v1/chat/completions", request) + + +@app.post("/v1/completions") +async def completions(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(VLLM_URL, "/v1/completions", request) + + +@app.post("/v1/embeddings") +async def embeddings(request: Request) -> Response: + """Routed to riprap-models's granite-embed (vLLM doesn't serve our + embedding model).""" + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/granite-embed", request) + + +@app.get("/v1/models") +async def models(request: Request) -> Response: + _check_auth(request) + async with httpx.AsyncClient(timeout=10) as client: + r = await client.get(f"{VLLM_URL}/v1/models") + return Response(content=r.content, status_code=r.status_code, + media_type=r.headers.get("content-type", "application/json")) + + +# riprap-models specialist routes +@app.post("/v1/prithvi-pluvial") +async def prithvi_pluvial(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/prithvi-pluvial", request) + + +@app.post("/v1/terramind") +async def terramind(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/terramind", request) + + +@app.post("/v1/ttm-forecast") +async def ttm_forecast(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/ttm-forecast", request) + + +@app.post("/v1/gliner-extract") +async def gliner_extract(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/gliner-extract", request) + + +@app.api_route("/v1/{path:path}", methods=["POST"]) +async def catch_all(path: str, request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, f"/v1/{path}", request) diff --git a/inference/Dockerfile b/inference/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..099daed1862f1680802b9b16dae3504d5f3eee85 --- /dev/null +++ b/inference/Dockerfile @@ -0,0 +1,94 @@ +# Riprap Inference Space — headless GPU API for both UI Spaces. +# +# Runs three things in one L4 container: +# 1. Ollama serving Granite 4.1 H-Small (Q4_K_M, ~5 GB) on :11434 +# with OpenAI-compat /v1 surface so vLLM-flavored clients work +# 2. riprap-models (Prithvi + TerraMind + TTM + GLiNER + Embedding) on :7861 +# 3. FastAPI bearer-auth proxy on :7860 (HF Spaces public port) that +# forwards /v1/chat/completions, /v1/embeddings → Ollama +# and /v1/{prithvi,terramind,ttm,gliner,embed} → riprap-models +# +# Both UI Spaces (lablab-ai-amd-developer-hackathon/riprap-nyc and +# msradam/riprap) point their RIPRAP_LLM_BASE_URL + RIPRAP_ML_BASE_URL +# at this Space's public URL with a shared bearer token. +# +# Why Ollama Q4 instead of vLLM BF16: an L4 has 24 GB VRAM. BF16 +# Granite 4.1 8B (~16 GB) plus the EO model stack (~10 GB) plus vLLM +# KV cache exceeds that. Q4 quantized Granite (~5 GB) fits with the +# whole EO chain co-resident. The OpenAI /v1 surface means clients +# can't tell. + +FROM nvidia/cuda:12.4.1-cudnn-runtime-ubuntu22.04 AS base + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 python3-pip python3-venv python-is-python3 \ + curl ca-certificates git zstd procps \ + gdal-bin libgdal-dev libgeos-dev libproj-dev \ + libgl1 libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + +RUN useradd -m -u 1000 user +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:/usr/local/bin:/usr/bin:/bin \ + PYTHONUNBUFFERED=1 \ + HF_HOME=/home/user/.cache/huggingface \ + TRANSFORMERS_CACHE=/home/user/.cache/huggingface \ + OLLAMA_HOST=127.0.0.1:11434 \ + OLLAMA_NUM_PARALLEL=1 \ + OLLAMA_KEEP_ALIVE=24h \ + OLLAMA_MAX_LOADED_MODELS=2 \ + OLLAMA_FLASH_ATTENTION=1 \ + OLLAMA_KV_CACHE_TYPE=q8_0 \ + OLLAMA_MODELS=/home/user/.ollama/models + +RUN curl -fsSL https://ollama.com/install.sh | sh + +WORKDIR /home/user/app + +# Proxy app deps (FastAPI + httpx) and riprap-models specialist deps. +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir \ + fastapi>=0.115 uvicorn[standard]>=0.32 httpx>=0.27 pydantic>=2.9 + +COPY services/riprap-models/requirements.txt /tmp/req-models.txt +RUN pip install --no-cache-dir -r /tmp/req-models.txt + +# Heavier ML stack the riprap-models service needs at runtime. peft +# baked at build time (see Dockerfile.l4 for backstory). torchvision +# from the cu124 wheel index so it matches the base image's CUDA. +RUN pip install --no-cache-dir \ + --index-url https://download.pytorch.org/whl/cu124 \ + torchvision \ + && pip install --no-cache-dir \ + peft==0.18.1 \ + granite-tsfm==0.3.3 \ + "sentence-transformers>=3.3,<4" \ + "gliner>=0.2.6" \ + --index-url https://pypi.org/simple + +# Bake Granite 4.1 weights at build. We pull two tags: +# :3b — fast routing / planner +# :8b — Capstone synthesis with Mellea rejection sampling +RUN mkdir -p $OLLAMA_MODELS && \ + ollama serve & \ + OPID=$! && \ + for i in $(seq 1 30); do curl -sf http://127.0.0.1:11434/ > /dev/null && break; sleep 1; done && \ + ollama pull granite4.1:8b && \ + ollama pull granite4.1:3b && \ + kill $OPID 2>/dev/null || true && \ + sleep 2 + +# Service code. The deploy script (scripts/deploy_inference_space.sh) +# moves inference/{proxy.py,entrypoint.sh,Dockerfile} to the repo root +# before pushing, so the COPY paths below are root-relative. +COPY services/riprap-models/main.py ./riprap_models.py +COPY proxy.py ./proxy.py +COPY entrypoint.sh ./entrypoint.sh +RUN chmod +x ./entrypoint.sh + +RUN chown -R user:user /home/user +USER user + +EXPOSE 7860 +CMD ["./entrypoint.sh"] diff --git a/inference/entrypoint.sh b/inference/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..24d2fed3cb20aeb533eb9eb5475078fa4486afdb --- /dev/null +++ b/inference/entrypoint.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env sh +# Inference Space entrypoint: Ollama + riprap-models + FastAPI proxy. + +set -e + +# --- 0. EO toolchain (terratorch + Sentinel-2 chain). Runtime-installed +# because the build sandbox is too tight to fit it next to +# Granite weights. --------------------------------------------- +EO_DIR="$HOME/.eo-pkgs" +EO_MARKER="$EO_DIR/.installed" +if [ ! -f "$EO_MARKER" ]; then + echo "[entrypoint.inf] installing EO toolchain into $EO_DIR ..." + mkdir -p "$EO_DIR" + if pip install --no-cache-dir --no-deps --target="$EO_DIR" \ + terratorch==1.1rc6 einops diffusers timm; then + if PYTHONPATH="$EO_DIR:$PYTHONPATH" python -c " +import terratorch +import terratorch.models.backbones.terramind.model.terramind_register +from terratorch.registry import FULL_MODEL_REGISTRY +n = len([k for k in FULL_MODEL_REGISTRY if 'terramind' in k.lower()]) +assert n > 0 +print(f'[entrypoint.inf] terratorch ok ({n} terramind entries)') +"; then + touch "$EO_MARKER" + echo "[entrypoint.inf] EO toolchain READY" + else + echo "[entrypoint.inf] EO verify FAILED — TerraMind probes will skip" + fi + else + echo "[entrypoint.inf] pip install FAILED — TerraMind probes will skip" + fi +else + echo "[entrypoint.inf] EO toolchain cached" +fi +export PYTHONPATH="$EO_DIR:$PYTHONPATH" + +# --- 1. Ollama (Granite 4.1 baked into the image, just serve them) --- +LOG_OLLAMA="$HOME/ollama.log" +ollama serve 2>&1 | tee "$LOG_OLLAMA" & +OLLAMA_PID=$! + +for i in $(seq 1 60); do + if curl -sf http://127.0.0.1:11434/ > /dev/null 2>&1; then + echo "[entrypoint.inf] ollama up after ${i}s" + break + fi + if ! kill -0 "$OLLAMA_PID" 2>/dev/null; then + echo "[entrypoint.inf] FATAL: ollama died" + tail -40 "$LOG_OLLAMA" || true + exit 1 + fi + sleep 1 +done + +# Pre-warm 8B into VRAM (24h keep-alive). 3B will lazy-load on first +# planner call. +echo "[entrypoint.inf] pre-warming granite4.1:8b ..." +curl -s -X POST http://127.0.0.1:11434/api/generate \ + -d '{"model":"granite4.1:8b","prompt":"hi","stream":false,"keep_alive":"24h","options":{"num_predict":1}}' \ + -o /dev/null --max-time 120 \ + && echo "[entrypoint.inf] 8b warm" \ + || echo "[entrypoint.inf] WARN: 8b warmup failed (will load lazily)" + +# --- 2. riprap-models on :7861 --------------------------------------- +LOG_MODELS="$HOME/riprap-models.log" +uvicorn riprap_models:app --host 127.0.0.1 --port 7861 --log-level info \ + > "$LOG_MODELS" 2>&1 & +MODELS_PID=$! + +for i in $(seq 1 60); do + if curl -sf http://127.0.0.1:7861/healthz > /dev/null 2>&1; then + echo "[entrypoint.inf] riprap-models up after ${i}s" + break + fi + if ! kill -0 "$MODELS_PID" 2>/dev/null; then + echo "[entrypoint.inf] FATAL: riprap-models died" + tail -40 "$LOG_MODELS" || true + exit 1 + fi + sleep 1 +done + +# --- 3. GPU sanity --------------------------------------------------- +if command -v nvidia-smi > /dev/null 2>&1; then + nvidia-smi -L || true +fi + +# --- 4. FastAPI bearer-auth proxy on :7860 (foreground) ------------- +exec uvicorn proxy:app --host 0.0.0.0 --port 7860 --log-level info diff --git a/inference/proxy.py b/inference/proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..235be5c2f6f35571b7735217a8e5a0a700b61d34 --- /dev/null +++ b/inference/proxy.py @@ -0,0 +1,160 @@ +"""Riprap Inference Space — bearer-auth proxy on port 7860. + +Forwards /v1/chat/completions and /v1/embeddings to Ollama on +localhost:11434 (which exposes an OpenAI-compatible surface), and +forwards everything else to riprap-models on localhost:7861. + +A single shared secret (env var RIPRAP_PROXY_TOKEN) gates all +inbound calls; clients pass it as `Authorization: Bearer `. +The two UI Spaces (lablab + personal mirror) carry the same token +in their RIPRAP_LLM_API_KEY env var. + +Streaming endpoints (SSE for chat completions) are forwarded with +correct chunk-by-chunk relay; non-streaming endpoints are buffered. +""" +from __future__ import annotations + +import os +from typing import AsyncIterator + +import httpx +from fastapi import FastAPI, HTTPException, Request +from fastapi.responses import JSONResponse, Response, StreamingResponse + +OLLAMA_URL = "http://127.0.0.1:11434" +MODELS_URL = "http://127.0.0.1:7861" + +PROXY_TOKEN = os.environ.get("RIPRAP_PROXY_TOKEN", "") + +app = FastAPI(title="Riprap Inference Proxy") + + +def _check_auth(request: Request) -> None: + if not PROXY_TOKEN: + raise HTTPException(503, "RIPRAP_PROXY_TOKEN not set on the inference Space") + auth = request.headers.get("authorization", "") + if not auth.startswith("Bearer "): + raise HTTPException(401, "missing bearer token") + if auth.removeprefix("Bearer ").strip() != PROXY_TOKEN: + raise HTTPException(401, "invalid bearer token") + + +@app.get("/") +def root(): + # HF Spaces hits / for health on idle-wakeup. Don't require auth. + return {"service": "riprap-inference", "ok": True} + + +@app.get("/healthz") +async def healthz(): + out = {"proxy": "ok"} + async with httpx.AsyncClient(timeout=5) as client: + try: + r = await client.get(f"{OLLAMA_URL}/api/tags") + out["ollama"] = "ok" if r.status_code == 200 else f"http_{r.status_code}" + except Exception as e: + out["ollama"] = f"err: {type(e).__name__}" + try: + r = await client.get(f"{MODELS_URL}/healthz") + out["riprap_models"] = "ok" if r.status_code == 200 else f"http_{r.status_code}" + except Exception as e: + out["riprap_models"] = f"err: {type(e).__name__}" + return out + + +# ── Ollama (OpenAI-compat) routes ───────────────────────────────────── +async def _stream_passthrough(upstream: httpx.Response) -> AsyncIterator[bytes]: + async for chunk in upstream.aiter_raw(): + yield chunk + + +async def _proxy_post(upstream_base: str, path: str, request: Request, + *, timeout: float = 300.0) -> Response: + body = await request.body() + headers = { + "content-type": request.headers.get("content-type", "application/json"), + "accept": request.headers.get("accept", "*/*"), + } + is_stream = b'"stream":true' in body or b'"stream": true' in body + client = httpx.AsyncClient(timeout=timeout) + upstream_req = client.build_request( + "POST", f"{upstream_base}{path}", content=body, headers=headers + ) + upstream = await client.send(upstream_req, stream=is_stream) + + if is_stream: + return StreamingResponse( + _stream_passthrough(upstream), + status_code=upstream.status_code, + media_type=upstream.headers.get("content-type", "text/event-stream"), + background=upstream.aclose, # close upstream when client disconnects + ) + content = await upstream.aread() + await upstream.aclose() + await client.aclose() + return Response( + content=content, + status_code=upstream.status_code, + media_type=upstream.headers.get("content-type", "application/json"), + ) + + +@app.post("/v1/chat/completions") +async def chat_completions(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(OLLAMA_URL, "/v1/chat/completions", request) + + +@app.post("/v1/completions") +async def completions(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(OLLAMA_URL, "/v1/completions", request) + + +@app.post("/v1/embeddings") +async def embeddings(request: Request) -> Response: + """OpenAI-style embeddings. Routed to riprap-models's granite-embed + endpoint, which returns the same {data: [{embedding: [...]}]} shape.""" + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/granite-embed", request) + + +@app.get("/v1/models") +async def models(request: Request) -> Response: + _check_auth(request) + async with httpx.AsyncClient(timeout=10) as client: + r = await client.get(f"{OLLAMA_URL}/v1/models") + return Response(content=r.content, status_code=r.status_code, + media_type=r.headers.get("content-type", "application/json")) + + +# ── riprap-models (specialist ML) routes ────────────────────────────── +@app.post("/v1/prithvi-pluvial") +async def prithvi_pluvial(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/prithvi-pluvial", request) + + +@app.post("/v1/terramind") +async def terramind(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/terramind", request) + + +@app.post("/v1/ttm-forecast") +async def ttm_forecast(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/ttm-forecast", request) + + +@app.post("/v1/gliner-extract") +async def gliner_extract(request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, "/v1/gliner-extract", request) + + +# Catch-all for any riprap-models endpoints not explicitly listed above. +@app.api_route("/v1/{path:path}", methods=["POST"]) +async def catch_all(path: str, request: Request) -> Response: + _check_auth(request) + return await _proxy_post(MODELS_URL, f"/v1/{path}", request) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..c42aaed20ccd8058e40503609f701e9efb058409 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,26 @@ +[tool.ruff] +line-length = 100 +target-version = "py310" +extend-exclude = [".venv", "data", "corpus", "outputs"] + +[tool.ruff.lint] +# Pyflakes (real bug class) + import + bugbear; skip purely stylistic rules +# (E701/E702 multi-statement-per-line) where the compact form is intentional +# in our FSM error-recording one-liners. +select = ["F", "E4", "E7", "E9", "I", "B", "UP"] +ignore = [ + "E701", # one-line `if cond: stmt` — intentional in compact guards + "E702", # `a; b` — intentional in compact `rec["ok"] = False; rec["err"] = ...` pairs + "E741", # ambiguous var name (e.g. `l`) — accept context where it's clear + "B008", # function call in default arg — fine for httpx Timeout etc. +] + +[tool.ruff.lint.per-file-ignores] +# Scripts use one-shot patterns we don't want to over-lint. +"scripts/*.py" = ["F841", "B007"] + +[tool.vulture] +min_confidence = 80 +ignore_decorators = ["@app.get", "@app.post", "@action"] +ignore_names = ["request"] # FastAPI handler signatures +paths = ["app", "web", "scripts", "riprap.py", "agent.py"] diff --git a/requirements-experiments.txt b/requirements-experiments.txt new file mode 100644 index 0000000000000000000000000000000000000000..5516a4ba8c488a3254d844ff75acc2460e00d860 --- /dev/null +++ b/requirements-experiments.txt @@ -0,0 +1,31 @@ +# Experiment-only dependencies. NOT installed on HF Spaces. +# Production requirements.txt stays minimal; this file picks up the +# heavy ML / EO toolchain that experiments need. +# +# Install: uv pip install -r requirements-experiments.txt + +# STAC + remote sensing +pystac-client>=0.7 +planetary-computer>=1.0 +rioxarray>=0.15 +xarray>=2024.1 + +# Phase 1: Prithvi-EO 2.0 (Sen1Floods11 fine-tune) +# terratorch is the IBM/NASA loading framework for Prithvi-EO 2.0. +# Pinned loosely so experiments can pick up bug fixes; if integration +# happens, pin tighter in production requirements. +terratorch>=1.0 +einops>=0.8 + +# Phase 2: GLiNER structured extraction +gliner>=0.2.13 + +# Phase 3: Granite Embedding Reranker R2 (cross-encoder via +# sentence-transformers, sidecar pattern — vLLM --task score is out of +# scope per project decision) +sentence-transformers>=3.3 + +# General experiment tooling +pyarrow>=18.0 +matplotlib>=3.8 +pillow>=10.0 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..950b22d3bde2fa886ad89b1128d3f84747bc498e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,116 @@ +# Riprap runtime dependencies (deployment). +# Offline-only deps (py3dep, terratorch, whitebox-workflows) are NOT here +# — they only run for the one-time fixture pre-compute in scripts/. + +# Web + streaming +fastapi>=0.115 +uvicorn>=0.32 +httpx>=0.27 +pydantic>=2.9 + +# Geo +geopandas>=1.0 +shapely>=2.0 +pyproj>=3.6 +fiona>=1.10 +rasterio>=1.4 + +# Data +pandas>=2.2 +pyarrow>=18.0 +numpy>=1.26 + +# pyarrow.PyExtensionType was removed in pyarrow 17 and older `datasets` +# (transitive dep of sentence-transformers / gliner / terratorch) crashes +# on import against pyarrow 18+. datasets >= 3.0 uses pa.ExtensionType +# instead. Without this pin, HF Spaces' resolver picks an older datasets +# and FastAPI startup dies before the first request. +datasets>=3.0 + +# RAG: Granite Embedding 278M (CPU torch is sufficient on HF Spaces). +# sentence-transformers 3.x's model_card.py does +# `from transformers.integrations import CodeCarbonCallback` at import +# time. transformers's lazy-import surface raises if `codecarbon` isn't +# present; on HF Space (Python 3.10) the lazy-import resolution fails +# even on import-only paths. Two options: +# (1) pin sentence-transformers to a version that didn't have model_card +# (2) install codecarbon so the lazy-import resolves +# We tried (1) at <3.4 and <4 — both failed because 3.3.x also imports +# CodeCarbonCallback. Going with (2): codecarbon is a small ~7MB pure- +# Python package; we don't enable its tracking, just satisfy the import. +sentence-transformers>=3.3,<4 +codecarbon>=2.5,<4 +pypdf>=5.0 + +# Tight coexistence pins: granite-tsfm 0.3.x calls transformers.utils +# .download_url which was removed in transformers 5.x; mellea 0.3.x is +# happy with the older hf_hub. Keep BOTH older to avoid the conflict +# (transformers >=4.55,<5 + huggingface_hub >=0.34,<1). +transformers>=4.55,<5 +huggingface_hub>=0.34,<1 + +# Granite 4.1 reconciliation via Ollama (local fallback) +ollama>=0.4 + +# LiteLLM Router: unifies vLLM (AMD GPU, OpenAI-compatible) and Ollama +# behind one chat() call surface, with automatic primary->fallback +# routing when RIPRAP_LLM_PRIMARY=vllm is unreachable. See app/llm.py. +litellm>=1.52 + +# GLiNER specialist (Phase 2): typed entity extraction over RAG output. +# Apache-2.0 model is `urchade/gliner_medium-v2.1` — NOT the gliner_base +# variant which is CC-BY-NC-4.0. See app/context/gliner_extract.py. +gliner>=0.2.13 + +# Phase 1 (Prithvi live) + Phase 4 (TerraMind) + earth-observation deps. +# +# These deps live in `requirements-experiments.txt` (local + AMD), NOT +# in production. Two attempts at bringing them into the HF image (the +# floor at 1.0.x then the pin at 1.1rc6) both failed pip resolution +# against our Py3.10 constraints (transformers<5, hf_hub<1, +# granite-tsfm<0.3.4, mellea<0.4). `terratorch>=1.2` pins numpy>=2.2 +# which breaks the rest of the stack; `1.1rc6` and earlier had +# transitive cone conflicts the resolver couldn't satisfy in the +# 30-second pip budget. +# +# On HF Spaces the lazy-import path returns clean `skipped: deps +# unavailable on this deployment` for terramind_synthesis (which has +# no remote-inference path); the other EO specialists (prithvi_live, +# terramind_lulc, terramind_buildings) work via app/inference.py +# routing to the AMD MI300X droplet, provided we have the chip-fetch +# deps below — they're small (pure-Python or thin wrappers around +# numpy/rasterio which we already have) and don't pull terratorch or +# torchvision binaries. +# - planetary-computer / pystac-client: STAC search at Microsoft PC +# - rioxarray / xarray: COG band reads +# - einops: tensor reshape used by prithvi_live._build_chip +planetary-computer>=1.0 +pystac-client>=0.7 +rioxarray>=0.15 +xarray>=2024.1 +einops>=0.7 + +# Burr FSM +burr>=0.40 + +# Granite TimeSeries TTM r2 — short-horizon residual nowcast (Ekambaram et al. +# 2024, NeurIPS). The PyPI package name is granite-tsfm; importable as +# tsfm_public. Brings in transformers + accelerate; torch is already in the +# CUDA base image. +# Pinned to 0.3.3 because >=0.3.4 dropped Python 3.10 support and the +# CUDA-runtime base image ships Ubuntu 22.04 / Python 3.10. The +# tsfm_public.toolkit.get_model API is stable across this minor range. +granite-tsfm>=0.3.0,<0.3.4 + +# IBM Research's Mellea — instruct/validate/repair framework. Powers the +# default reconciler: Granite output + programmatic post-conditions +# + rejection sampling. +# Pinned to <0.4 because 0.4+ requires Python>=3.11 and the +# nvidia/cuda:12.4.1-cudnn-runtime-ubuntu22.04 base image ships Python 3.10. +# 0.3.2 has the same instruct/req/RejectionSamplingStrategy API surface +# we use; if it doesn't, the validator falls through to the standard +# reconciler (graceful degradation). +mellea>=0.3.0,<0.4 + +# Misc +tqdm>=4.66 diff --git a/riprap.py b/riprap.py new file mode 100644 index 0000000000000000000000000000000000000000..40781a91fc7b0ec50477a01bad4c616b0de2df74 --- /dev/null +++ b/riprap.py @@ -0,0 +1,92 @@ +"""Riprap — CLI driver for the bulk-mode flood exposure register. + +Joins an asset class (schools / NYCHA / MTA entrances) against the +static flood layers (Sandy + DEP Stormwater scenarios), runs the +scoring rubric over the result, and emits a ranked CSV plus a tier +distribution to stderr. +""" +from __future__ import annotations + +import argparse +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +import pandas as pd # noqa: E402 + +from app.assets import schools # noqa: E402 +from app.flood_layers import dep_stormwater, sandy_inundation # noqa: E402 +from app.score import WEIGHTS, score_frame # noqa: E402 + +OUT = Path(__file__).resolve().parent / "outputs" +OUT.mkdir(exist_ok=True) + + +def build_schools_register() -> pd.DataFrame: + print("loading schools...", file=sys.stderr) + s = schools.load() + print(f" {len(s)} schools loaded", file=sys.stderr) + + print("joining Sandy Inundation Zone...", file=sys.stderr) + s["sandy"] = sandy_inundation.join(s).astype(int) + print(f" {int(s['sandy'].sum())} schools inside Sandy zone", file=sys.stderr) + + for scen in ["dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current"]: + print(f"joining {scen}...", file=sys.stderr) + j = dep_stormwater.join(s, scen) + s[scen] = (j["depth_class"] > 0).astype(int) + s[f"{scen}_depth_class"] = j["depth_class"].values + s[f"{scen}_depth_label"] = j["depth_label"].values + print(f" {int(s[scen].sum())} schools inside {scen}", file=sys.stderr) + + s = score_frame(s) + + # drop geometry for CSV; keep lat/lon for journalist usability + s["lat"] = s.geometry.to_crs("EPSG:4326").y + s["lon"] = s.geometry.to_crs("EPSG:4326").x + cols = ["loc_code", "name", "address", "borough", "bbl", "bin", + "geo_district", "lat", "lon", + "sandy", + "dep_extreme_2080", "dep_extreme_2080_depth_label", + "dep_moderate_2050", "dep_moderate_2050_depth_label", + "dep_moderate_current", "dep_moderate_current_depth_label", + "score", "tier"] + return pd.DataFrame(s[cols]) + + +def main() -> int: + ap = argparse.ArgumentParser(description="Riprap flood exposure register") + ap.add_argument("--asset-class", default="schools") + ap.add_argument("--out", default=None) + ap.add_argument("--top", type=int, default=20, help="rows to print to stdout") + args = ap.parse_args() + + if args.asset_class != "schools": + print(f"asset class '{args.asset_class}' not yet implemented", file=sys.stderr) + return 2 + + df = build_schools_register() + df = df.sort_values(["score", "name"], ascending=[False, True]) + + out_path = Path(args.out) if args.out else OUT / "schools_register.csv" + df.to_csv(out_path, index=False) + print(f"\nwrote {len(df)} rows -> {out_path}", file=sys.stderr) + + print(f"\n=== top {args.top} ===") + print(df.head(args.top).to_string(index=False)) + + print("\n=== tier distribution ===") + print(df["tier"].value_counts().sort_index().to_string()) + + print("\n=== signal totals ===") + for k in WEIGHTS: + if k in df.columns: + print(f" {k:24s}: {int(df[k].sum()):4d} schools") + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/audit.py b/scripts/audit.py new file mode 100644 index 0000000000000000000000000000000000000000..17976c5d005109e452c652718d06be5294721297 --- /dev/null +++ b/scripts/audit.py @@ -0,0 +1,158 @@ +"""Hallucination audit harness. + +Runs the FSM against a curated address sweep, logs every paragraph, +counts dropped sentences, flags any sentence with an event name not in +its source documents. + +Run after the schools register has finished building (otherwise it +contends with the batch for Ollama). + + python scripts/audit.py +""" +from __future__ import annotations + +import json +import sys +import time +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(ROOT)) + +from app.fsm import run # noqa: E402 + +OUT = ROOT / "outputs" / "audit_log.jsonl" +OUT.parent.mkdir(exist_ok=True, parents=True) + +# A curated cross-borough sweep covering the full range of conditions +ADDRESSES = [ + # Far Rockaway / Sandy zone (everything fires) + "180 Beach 35 St, Queens", + "Beach 105 Street and Rockaway Boulevard, Queens", + + # Hollis / Jamaica (Ida basement deaths) + "153-09 90 Avenue, Jamaica, Queens", + "Hollis Avenue and 200th Street, Queens", + + # Brooklyn coastal — Coney Island / NYCHA + "2950 W 25 Street, Brooklyn", + "Surf Avenue and West 25 Street, Brooklyn", + "Sheepshead Bay Road, Brooklyn", + + # Carroll Gardens / Gowanus (chronic flooding) + "Smith and 9 Street, Brooklyn", + "Carroll Street and 3 Avenue, Brooklyn", + + # Lower Manhattan / Sandy zone + "280 Broome Street, Manhattan", + "South Street Seaport, Manhattan", + "Battery Park, Manhattan", + + # Midtown / dry control + "350 5 Avenue, Manhattan", # Empire State + "1 Times Square, Manhattan", + "Lincoln Center, Manhattan", + + # Bronx + "Pelham Bay Park, Bronx", + "Hunts Point, Bronx", + "Yankee Stadium, Bronx", + + # Staten Island + "Tottenville, Staten Island", + "Great Kills, Staten Island", + "St. George Ferry Terminal, Staten Island", + + # Queens dry / inland + "Forest Hills, Queens", + "JFK Airport, Queens", + "Astoria Park, Queens", + + # Edge cases + "Brooklyn Bridge Park, Brooklyn", + "Roosevelt Island, Manhattan", +] + +EVENT_NAMES = ["sandy", "ida", "ophelia", "henri", "irene", "isaias", + "harvey", "katrina", "florence"] + + +def find_event_leaks(paragraph: str, doc_corpus: str) -> list[str]: + leaks = [] + p = paragraph.lower() + for ev in EVENT_NAMES: + if ev in p and ev not in doc_corpus.lower(): + leaks.append(ev) + return leaks + + +def main() -> int: # TODO(cleanup): cc-grade-D (24) + if OUT.exists(): + OUT.unlink() + print(f"running audit on {len(ADDRESSES)} addresses; logging to {OUT}", + file=sys.stderr) + + summary = { + "total": 0, "ok": 0, "dropped_total": 0, + "with_drops": 0, "event_leaks": 0, + } + t0 = time.time() + for q in ADDRESSES: + try: + r = run(q) + except Exception as e: + print(f" ! {q[:50]:<50} ERR: {type(e).__name__}: {e}", file=sys.stderr) + continue + para = r.get("paragraph") or "" + audit = r.get("audit") or {} + dropped = audit.get("dropped", []) or [] + + # rebuild a haystack from documents we sent to Granite + from app.reconcile import build_documents + # NOTE: build_documents needs the same snap shape the FSM stored + snap = {k: r.get(k) for k in ("geocode","sandy","dep","floodnet", + "nyc311","microtopo","ida_hwm","rag")} + doc_msgs = build_documents(snap) + haystack = "\n".join(m.get("content", "") for m in doc_msgs) + + leaks = find_event_leaks(para, haystack) + + rec = { + "query": q, + "address": (r.get("geocode") or {}).get("address"), + "borough": (r.get("geocode") or {}).get("borough"), + "paragraph": para, + "raw": audit.get("raw"), + "dropped": dropped, + "event_leaks": leaks, + "sandy": r.get("sandy"), + "n_floodnet_events_3y": (r.get("floodnet") or {}).get("n_flood_events_3y", 0), + "n_311": (r.get("nyc311") or {}).get("n", 0), + "microtopo_pct_200m": (r.get("microtopo") or {}).get("rel_elev_pct_200m"), + } + with OUT.open("a") as f: + f.write(json.dumps(rec, default=str) + "\n") + + summary["total"] += 1 + summary["dropped_total"] += len(dropped) + if dropped: summary["with_drops"] += 1 + if leaks: summary["event_leaks"] += 1 + if not leaks and not dropped: summary["ok"] += 1 + + marker = "✓" if (not leaks and not dropped) else ("⚠" if dropped or leaks else "·") + print(f" {marker} {q[:50]:<50} dropped={len(dropped)} leaks={leaks or '-'}", + file=sys.stderr) + + elapsed = time.time() - t0 + print(f"\n=== SUMMARY (in {elapsed:.0f}s) ===", file=sys.stderr) + for k, v in summary.items(): + print(f" {k:18s} {v}", file=sys.stderr) + print(f"\nfull log: {OUT}", file=sys.stderr) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/bake_cornerstone_rasters.py b/scripts/bake_cornerstone_rasters.py new file mode 100644 index 0000000000000000000000000000000000000000..2a0bb6b4d4ba4fef4c0759b46649d59c11ef1d80 --- /dev/null +++ b/scripts/bake_cornerstone_rasters.py @@ -0,0 +1,108 @@ +"""Bake DEP scenarios + Sandy extent to compact GeoTIFFs in data/baked/. + +The Cornerstone is a Hazard Reader — it reads what NYC's ground already +remembers (modeled DEP scenarios, empirical 2012 Sandy extent). All of +those layers are static, so we bake them once into uint8 GeoTIFFs in +EPSG:2263 (NYC State Plane, feet) and look up per-asset depth class +via rasterio.sample() instead of running gpd.sjoin per query. + +Per-query latency drops from ~10 ms (warm) / ~33 s (cold-load) on the +HF Space CPU to ~3 ms with a 73 ms one-time cold-load. Baked footprint +is ~7 MB total versus ~46 MB GDBs + 87 MB Sandy GeoJSON. + +See experiments/22_cornerstone_optim/RESULTS.md for the bench. + +Run: + uv run python scripts/bake_cornerstone_rasters.py +""" +from __future__ import annotations + +import sys +import time +from pathlib import Path + +import numpy as np +import rasterio +from rasterio import features +from rasterio.transform import from_origin + +REPO = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(REPO)) + +from app.flood_layers import dep_stormwater, sandy_inundation # noqa: E402 + +NYC_CRS = "EPSG:2263" +RES_FT = 10.0 +OUT_DIR = REPO / "data" / "baked" + + +def nyc_grid(res_ft: float = RES_FT): + minx, miny = 910_000.0, 110_000.0 + maxx, maxy = 1_080_000.0, 280_000.0 + width = int(np.ceil((maxx - minx) / res_ft)) + height = int(np.ceil((maxy - miny) / res_ft)) + return from_origin(minx, maxy, res_ft, res_ft), width, height + + +def burn(gdf, value_col_or_const, out_path, transform, width, height): + if isinstance(value_col_or_const, str): + shapes = ((geom, int(val)) for geom, val + in zip(gdf.geometry, gdf[value_col_or_const])) + else: + v = int(value_col_or_const) + shapes = ((geom, v) for geom in gdf.geometry) + arr = features.rasterize( + shapes=shapes, out_shape=(height, width), transform=transform, + fill=0, dtype="uint8", merge_alg=rasterio.enums.MergeAlg.replace, + ) + out_path.parent.mkdir(parents=True, exist_ok=True) + profile = { + "driver": "GTiff", "dtype": "uint8", "count": 1, + "width": width, "height": height, "transform": transform, + "crs": NYC_CRS, "compress": "deflate", "predictor": 2, + "tiled": True, "blockxsize": 512, "blockysize": 512, "nodata": 0, + } + with rasterio.open(out_path, "w", **profile) as dst: + dst.write(arr, 1) + return arr + + +def bake_dep(scenario, transform, width, height): + print(f" baking {scenario}...", end=" ", flush=True) + t0 = time.perf_counter() + g = dep_stormwater.load(scenario).copy() + g["Flooding_Category"] = g["Flooding_Category"].astype(int) + # rasterize lowest first so highest category wins at overlaps + g = g.sort_values("Flooding_Category", ascending=True) + out = OUT_DIR / f"{scenario}.tif" + arr = burn(g, "Flooding_Category", out, transform, width, height) + dt = time.perf_counter() - t0 + print(f"{dt:5.1f}s {out.stat().st_size/1e6:5.1f} MB " + f"nonzero={int((arr>0).sum()):,}") + + +def bake_sandy(transform, width, height): + print(" baking sandy...", end=" ", flush=True) + t0 = time.perf_counter() + g = sandy_inundation.load().copy() + out = OUT_DIR / "sandy.tif" + arr = burn(g, 1, out, transform, width, height) + dt = time.perf_counter() - t0 + print(f"{dt:5.1f}s {out.stat().st_size/1e6:5.1f} MB " + f"nonzero={int((arr>0).sum()):,}") + + +def main(): + transform, width, height = nyc_grid(RES_FT) + print(f"Grid: {width}x{height} px @ {RES_FT} ft/px") + print(f"Output: {OUT_DIR}\n") + bake_dep("dep_extreme_2080", transform, width, height) + bake_dep("dep_moderate_2050", transform, width, height) + bake_dep("dep_moderate_current", transform, width, height) + bake_sandy(transform, width, height) + total = sum(p.stat().st_size for p in OUT_DIR.glob("*.tif")) / 1e6 + print(f"\nTotal: {total:.1f} MB") + + +if __name__ == "__main__": + main() diff --git a/scripts/build_mta_entrances_register.py b/scripts/build_mta_entrances_register.py new file mode 100644 index 0000000000000000000000000000000000000000..89890cad3f933ddd47185ace78138aa3e7a41b44 --- /dev/null +++ b/scripts/build_mta_entrances_register.py @@ -0,0 +1,23 @@ +"""Pre-compute the MTA Subway Entrances flood-exposure register. + +Run: python scripts/build_mta_entrances_register.py + +Resume-safe: re-running picks up after a network blip. +""" +from __future__ import annotations + +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(ROOT)) + +from app.assets import mta_entrances # noqa: E402 +from app.register_builder import build_register # noqa: E402 + +if __name__ == "__main__": + build_register("mta_entrances", mta_entrances.load, + meta_keys=("name", "address", "borough", "entrance_type")) diff --git a/scripts/build_nycha_register.py b/scripts/build_nycha_register.py new file mode 100644 index 0000000000000000000000000000000000000000..b24ad099cd67b40cc7a7bb2e2fa1498613c6802f --- /dev/null +++ b/scripts/build_nycha_register.py @@ -0,0 +1,20 @@ +"""Pre-compute the NYCHA developments flood-exposure register. +Run: python scripts/build_nycha_register.py +""" +from __future__ import annotations + +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(ROOT)) + +from app.assets import nycha # noqa: E402 +from app.register_builder import build_register # noqa: E402 + +if __name__ == "__main__": + build_register("nycha", nycha.load, + meta_keys=("name", "address", "borough", "tds_num")) diff --git a/scripts/build_schools_register.py b/scripts/build_schools_register.py new file mode 100644 index 0000000000000000000000000000000000000000..353f4d3cf53cd0ab4c721dea8a2282f5b90262f9 --- /dev/null +++ b/scripts/build_schools_register.py @@ -0,0 +1,22 @@ +"""Pre-compute the NYC public schools flood-exposure register. +Run: python scripts/build_schools_register.py + +Resume-safe: re-running picks up after a network blip. +""" +from __future__ import annotations + +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(ROOT)) + +from app.assets import schools # noqa: E402 +from app.register_builder import build_register # noqa: E402 + +if __name__ == "__main__": + build_register("schools", schools.load, + meta_keys=("name", "address", "borough", "bbl", "bin")) diff --git a/scripts/compute_hydrology_indices.py b/scripts/compute_hydrology_indices.py new file mode 100644 index 0000000000000000000000000000000000000000..76eb83322c5efcd5d03fe697b7d65d687434db15 --- /dev/null +++ b/scripts/compute_hydrology_indices.py @@ -0,0 +1,92 @@ +"""Pre-compute TWI (Topographic Wetness Index) and HAND (Height Above +Nearest Drainage) for the cached NYC DEM. + +These are standard hydrology indices used by InfoWorks ICM, HEC-RAS, +and the Forest Service / USGS. They give the microtopo specialist new +per-address signal beyond elevation percentile + relief: + +- **TWI** = ln(specific_catchment_area / tan(slope)). HIGH values mean + a cell is saturation-prone (large upslope drainage area + low slope = + water accumulates here). +- **HAND** = vertical distance from each cell to the nearest channel. + LOW values (sub-meter) mean the address sits at or near drainage + level — flood-vulnerable. HIGH values mean it's perched on dry ground. + +Output: data/twi.tif and data/hand.tif, aligned with data/nyc_dem_30m.tif. + +Run: python scripts/compute_hydrology_indices.py +""" +from __future__ import annotations + +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +DEM_PATH = ROOT / "data" / "nyc_dem_30m.tif" +TWI_OUT = ROOT / "data" / "twi.tif" +HAND_OUT = ROOT / "data" / "hand.tif" + + +def main() -> int: + if not DEM_PATH.exists(): + print(f"missing {DEM_PATH}; run scripts/fetch_nyc_dem.py first", + file=sys.stderr) + return 1 + if TWI_OUT.exists() and HAND_OUT.exists(): + print(f"already exist: {TWI_OUT.name}, {HAND_OUT.name}", file=sys.stderr) + return 0 + + import whitebox_workflows as wbw + wbe = wbw.WbEnvironment() + wbe.verbose = True + wbe.working_directory = str(ROOT / "data") + + print("loading DEM...", file=sys.stderr) + dem = wbe.read_raster(str(DEM_PATH)) + + # 1. Hydrologic conditioning — fill depressions so flow routes terminate + # at the boundary, not inside spurious sinks. Wang & Liu fill is fast. + print("filling depressions (Wang & Liu)...", file=sys.stderr) + dem_filled = wbe.fill_depressions_wang_and_liu(dem) + + # 2. D-infinity flow accumulation -> specific catchment area for TWI + print("D-infinity flow accumulation...", file=sys.stderr) + sca = wbe.dinf_flow_accum(dem_filled, out_type="specific contributing area", + log_transform=False) + + # 3. Slope (degrees) for TWI + print("slope...", file=sys.stderr) + slope = wbe.slope(dem_filled, units="degrees") + + # 4. TWI = ln(SCA / tan(slope)) + print("TWI...", file=sys.stderr) + twi = wbe.wetness_index(sca, slope) + wbe.write_raster(twi, str(TWI_OUT.name), compress=True) + + # 5. Streams: D8 flow accumulation + threshold to a stream raster + print("D8 flow accumulation for stream extraction...", file=sys.stderr) + d8_accum = wbe.d8_flow_accum(dem_filled, out_type="cells", + log_transform=False) + + # Threshold the flow accumulation to identify channels — pick a value that + # gives a reasonable drainage network density. For 30m DEM over NYC, + # >1500 cells (~1.35 km²) is a reasonable channel-initiation threshold. + print("extracting streams...", file=sys.stderr) + streams = wbe.extract_streams(d8_accum, threshold=1500.0) + + # 6. HAND = vertical distance to nearest stream (along flow paths) + print("HAND (elevation_above_stream)...", file=sys.stderr) + hand = wbe.elevation_above_stream(dem_filled, streams) + wbe.write_raster(hand, str(HAND_OUT.name), compress=True) + + print(f"\nwrote:\n {TWI_OUT} ({TWI_OUT.stat().st_size // 1024} KB)\n" + f" {HAND_OUT} ({HAND_OUT.stat().st_size // 1024} KB)", + file=sys.stderr) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/deploy_droplet.sh b/scripts/deploy_droplet.sh new file mode 100755 index 0000000000000000000000000000000000000000..91fcc0cb19d4436fbb5d202de461316ec216cbda --- /dev/null +++ b/scripts/deploy_droplet.sh @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +# Riprap GPU-droplet bring-up — vLLM + riprap-models, idempotent. +# +# Designed for a fresh AMD MI300X droplet (DigitalOcean GPU droplet, +# AMD Developer Cloud node, etc.) with nothing more than: +# - Ubuntu 22.04 / 24.04 +# - Docker + AMD ROCm GPU drivers (kfd / dri device files) +# - SSH root access +# +# The script SSHes to the droplet, ensures the right images are +# pulled, builds the riprap-models container from this repo, starts +# both services, and runs healthchecks. Re-running on the same +# droplet is idempotent: existing containers are removed and +# recreated cleanly. +# +# Usage: +# scripts/deploy_droplet.sh +# +# Example: +# scripts/deploy_droplet.sh 129.212.181.238 "$(cat /tmp/riprap/vllm_token.txt)" +# +# Env knobs (optional, all have sensible defaults): +# SSH_USER default "root" +# SSH_KEY path to ssh key; default uses ssh-agent +# VLLM_IMAGE default "vllm/vllm-openai-rocm:v0.17.1" +# VLLM_PORT default 8001 (host) → 8000 (container) +# MODELS_PORT default 7860 (host) → 7860 (container) +# MODEL_REPO default "ibm-granite/granite-4.1-8b" +# HF_CACHE_HOST default "/root/hf-cache" on droplet +# SKIP_BUILD "1" to skip building riprap-models image +# (assume it's already present on droplet) +# +# Exits non-zero on any step that fails — including the final +# healthcheck — so this is safe to wrap in CI. +set -euo pipefail + +if [ "$#" -lt 2 ]; then + echo "Usage: $0 " >&2 + exit 64 +fi + +DROPLET_IP="$1" +TOKEN="$2" + +SSH_USER="${SSH_USER:-root}" +SSH_KEY_FLAG="" +if [ -n "${SSH_KEY:-}" ]; then + SSH_KEY_FLAG="-i $SSH_KEY" +fi +SSH="ssh $SSH_KEY_FLAG -o StrictHostKeyChecking=accept-new -o ConnectTimeout=10 ${SSH_USER}@${DROPLET_IP}" +SCP="scp $SSH_KEY_FLAG -o StrictHostKeyChecking=accept-new" + +VLLM_IMAGE="${VLLM_IMAGE:-vllm/vllm-openai-rocm:v0.17.1}" +VLLM_PORT="${VLLM_PORT:-8001}" +MODELS_PORT="${MODELS_PORT:-7860}" +MODEL_REPO="${MODEL_REPO:-ibm-granite/granite-4.1-8b}" +HF_CACHE_HOST="${HF_CACHE_HOST:-/root/hf-cache}" +SKIP_BUILD="${SKIP_BUILD:-0}" + +REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)" + +echo "==> Riprap droplet bring-up" +echo " droplet ip: $DROPLET_IP" +echo " vllm port: $VLLM_PORT" +echo " models port: $MODELS_PORT" +echo " model repo: $MODEL_REPO" +echo " repo root: $REPO_ROOT" +echo + +# ---- 1. Verify SSH + droplet readiness ---------------------------------- +echo "==> 1. SSH connectivity + GPU device check" +$SSH bash -s <<'REMOTE' +set -e +if ! command -v docker > /dev/null; then + echo "[droplet] docker not installed; aborting" >&2 + exit 1 +fi +if [ ! -e /dev/kfd ] || [ ! -e /dev/dri ]; then + echo "[droplet] no AMD GPU device files (/dev/kfd or /dev/dri); aborting" >&2 + exit 1 +fi +echo "[droplet] docker + AMD GPU device files present" +docker --version +REMOTE + +# ---- 2. Pull vLLM image --------------------------------------------------- +echo +echo "==> 2. Pull vLLM image (if not cached)" +$SSH "docker image inspect $VLLM_IMAGE > /dev/null 2>&1 || docker pull $VLLM_IMAGE" + +# ---- 3. Sync riprap-models source to droplet ----------------------------- +echo +echo "==> 3. Sync riprap-models source" +$SSH "mkdir -p /workspace/riprap-models /workspace/riprap-build" +# Sync Dockerfile + sources via tar over SSH (rsync may be missing on +# a minimal droplet; tar is part of any Linux base). +tar -C "$REPO_ROOT" -cf - services/riprap-models | \ + $SSH "tar -C /workspace/riprap-build -xf -" + +# ---- 4. Build riprap-models image ---------------------------------------- +if [ "$SKIP_BUILD" = "1" ]; then + echo + echo "==> 4. Skipping image build (SKIP_BUILD=1)" +else + echo + echo "==> 4. Build riprap-models image" + echo " (this takes ~10-20 min on first build; subsequent builds" + echo " reuse layer cache and are < 1 min)" + $SSH "cd /workspace/riprap-build && \ + docker build \ + -t riprap-models:latest \ + -f services/riprap-models/Dockerfile \ + ." +fi + +# ---- 5. Start vLLM container --------------------------------------------- +echo +echo "==> 5. Start vLLM container" +$SSH bash -s < /dev/null 2>&1 || true +mkdir -p ${HF_CACHE_HOST} +docker run -d --name vllm \\ + --device=/dev/kfd --device=/dev/dri --group-add=video \\ + --ipc=host --shm-size=16g \\ + -p ${VLLM_PORT}:8000 \\ + -v ${HF_CACHE_HOST}:/root/.cache/huggingface \\ + -e GLOO_SOCKET_IFNAME=eth0 -e VLLM_HOST_IP=127.0.0.1 \\ + --restart unless-stopped \\ + ${VLLM_IMAGE} \\ + --model ${MODEL_REPO} \\ + --host 0.0.0.0 --port 8000 --api-key "${TOKEN}" \\ + --max-model-len 8192 --served-model-name granite-4.1-8b +echo "[droplet] vllm container started" +REMOTE + +# ---- 6. Start riprap-models container ------------------------------------ +echo +echo "==> 6. Start riprap-models container" +$SSH bash -s < /dev/null 2>&1 || true +docker run -d --name riprap-models \\ + --device=/dev/kfd --device=/dev/dri --group-add=video \\ + --ipc=host --shm-size=8g \\ + -p ${MODELS_PORT}:7860 \\ + -v ${HF_CACHE_HOST}:/root/.cache/huggingface \\ + -e RIPRAP_MODELS_API_KEY="${TOKEN}" \\ + --restart unless-stopped \\ + riprap-models:latest +echo "[droplet] riprap-models container started" +REMOTE + +# ---- 7. Healthchecks ----------------------------------------------------- +echo +echo "==> 7. Healthchecks" +echo " waiting up to 90s for vLLM to expose /v1/models..." +DEADLINE=$((SECONDS + 90)) +while (( SECONDS < DEADLINE )); do + if curl -sf --max-time 5 "http://${DROPLET_IP}:${VLLM_PORT}/v1/models" \ + -H "Authorization: Bearer ${TOKEN}" > /tmp/vllm-models.json 2>/dev/null; then + echo " vLLM ready: $(head -c 200 /tmp/vllm-models.json)..." + break + fi + sleep 3 +done +if (( SECONDS >= DEADLINE )); then + echo " vLLM did not become ready in 90s; tailing container logs:" >&2 + $SSH "docker logs --tail 30 vllm" >&2 + exit 1 +fi + +echo " waiting up to 60s for riprap-models /healthz..." +DEADLINE=$((SECONDS + 60)) +while (( SECONDS < DEADLINE )); do + if curl -sf --max-time 5 "http://${DROPLET_IP}:${MODELS_PORT}/healthz" \ + > /tmp/models-health.json 2>/dev/null; then + echo " riprap-models ready: $(cat /tmp/models-health.json)" + break + fi + sleep 2 +done +if (( SECONDS >= DEADLINE )); then + echo " riprap-models did not become ready in 60s; tailing container logs:" >&2 + $SSH "docker logs --tail 30 riprap-models" >&2 + exit 1 +fi + +echo +echo "==> DONE" +echo " vLLM http://${DROPLET_IP}:${VLLM_PORT}/v1/models" +echo " riprap-models http://${DROPLET_IP}:${MODELS_PORT}/healthz" +echo +echo "Set these in your local env or HF Space variables:" +echo " RIPRAP_LLM_PRIMARY=vllm" +echo " RIPRAP_LLM_BASE_URL=http://${DROPLET_IP}:${VLLM_PORT}/v1" +echo " RIPRAP_LLM_API_KEY=${TOKEN}" +echo " RIPRAP_ML_BACKEND=remote" +echo " RIPRAP_ML_BASE_URL=http://${DROPLET_IP}:${MODELS_PORT}" +echo " RIPRAP_ML_API_KEY=${TOKEN}" diff --git a/scripts/deploy_inference_space.sh b/scripts/deploy_inference_space.sh new file mode 100755 index 0000000000000000000000000000000000000000..2640ac9ae78f10c0b5e431b4cabb3292de1dfd05 --- /dev/null +++ b/scripts/deploy_inference_space.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +# Deploy the Riprap inference Space (msradam/riprap-inference) — the +# headless GPU API both UI Spaces (lablab + personal) call into. +# +# Same orphan-branch pattern as deploy_personal_space.sh: the full +# git history of the source repo would trip HF Spaces' binary-file +# gate, so we push a single fresh commit containing only what the +# inference Space needs to run. + +set -euo pipefail + +REMOTE="inference" +URL="https://huggingface.co/spaces/msradam/riprap-inference" +BRANCH="hf-inference" +LABLAB_NAME_PATTERN="AMD-hackathon|lablab-ai" + +guard_against_lablab () { + if echo "$URL" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: URL ($URL) matches the lablab org pattern." + exit 1 + fi + final=$(curl -sIL -o /dev/null -w "%{url_effective}" "$URL") + if echo "$final" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: URL ($URL) redirects to a lablab-org URL ($final)" + exit 1 + fi +} + +if [ "${1:-}" = "--setup" ]; then + guard_against_lablab + if ! git remote | grep -q "^${REMOTE}$"; then + echo "[deploy.inf] adding remote '$REMOTE' → $URL" + git remote add "$REMOTE" "$URL" + fi + echo "[deploy.inf] set the following secrets in the inference Space" + echo " (Settings → Variables and secrets):" + echo " RIPRAP_PROXY_TOKEN " + echo " HF_TOKEN " + echo "[deploy.inf] then set the same RIPRAP_PROXY_TOKEN as RIPRAP_LLM_API_KEY" + echo " on both the lablab-org Space and msradam/riprap." + exit 0 +fi + +guard_against_lablab + +DEPLOY_TMP="$(git rev-parse --show-toplevel)/.deploy-tmp-inf" +rm -rf "$DEPLOY_TMP" +git worktree add --detach "$DEPLOY_TMP" HEAD + +( + cd "$DEPLOY_TMP" + git checkout --orphan "$BRANCH" + + # Strip everything except what the inference container needs. + rm -rf slides/ submission/ docs/ pitch/ research/ corpus/ \ + assets/ \ + tests/ experiments/ \ + data/ \ + web/ app/ scripts/ \ + Dockerfile Dockerfile.app Dockerfile.l4 \ + docker-compose.yml entrypoint.sh entrypoint.l4.sh \ + pyproject.toml uv.lock \ + agent.py riprap.py helios_nyc.py \ + ARCHITECTURE.md METHODOLOGY.md RESEARCH.md \ + LICENSE NOTICE README.md requirements*.txt + + # Inference Dockerfile + entrypoint + proxy go to the repo root + # (HF Spaces convention — Dockerfile + entrypoint.sh + proxy.py at + # top level). + mv inference/Dockerfile ./Dockerfile + mv inference/entrypoint.sh ./entrypoint.sh + mv inference/proxy.py ./proxy.py + rmdir inference 2>/dev/null || true + chmod +x entrypoint.sh + + # The Dockerfile COPYs services/riprap-models/{main.py,requirements.txt} + # so keep that path. Trim everything else under services/. + find services -mindepth 1 -maxdepth 1 -not -name riprap-models -exec rm -rf {} + + find services/riprap-models -mindepth 1 \ + -not -name main.py -not -name requirements.txt -exec rm -rf {} + + + cat > README.md <<'README' +--- +title: Riprap Inference (Headless GPU API) +emoji: 🌊 +colorFrom: indigo +colorTo: blue +sdk: docker +pinned: false +short_description: Headless GPU API for Riprap. Bearer-auth proxy on L4. +--- + +# Riprap Inference Space + +Headless GPU API for [Riprap](https://github.com/msradam/riprap-nyc). +Runs Granite 4.1 (Ollama, OpenAI-compatible) and the riprap-models +specialist service (Prithvi-EO 2.0 NYC-Pluvial, TerraMind LULC + +Buildings + Synthesis, Granite TTM r2, Granite Embedding, GLiNER) +behind a single FastAPI bearer-auth proxy on port 7860. + +Two UI Spaces consume this: + +- `lablab-ai-amd-developer-hackathon/riprap-nyc` — official AMD + hackathon submission (CPU UI). +- `msradam/riprap` — personal mirror (CPU UI). + +Both pass `Authorization: Bearer ` and call +`/v1/chat/completions`, `/v1/embeddings`, `/v1/prithvi-pluvial`, +`/v1/terramind`, `/v1/ttm-forecast`, `/v1/gliner-extract`. + +Apache 2.0. Source: https://github.com/msradam/riprap-nyc. +README + + git add -A + git -c user.email=msrahmanadam@gmail.com -c user.name="Adam Munawar Rahman" \ + commit -m "deploy(inference): headless GPU API on L4" + + echo "[deploy.inf] pushing $BRANCH → $REMOTE main ..." + git push --force "$REMOTE" "${BRANCH}:main" +) + +git worktree remove --force "$DEPLOY_TMP" +git branch -D "$BRANCH" 2>/dev/null || true +echo "[deploy.inf] done. Watch build at: ${URL}" diff --git a/scripts/deploy_personal_space.sh b/scripts/deploy_personal_space.sh new file mode 100755 index 0000000000000000000000000000000000000000..d58da1e3d7a39abd16a8265dd089f062c347354e --- /dev/null +++ b/scripts/deploy_personal_space.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash +# Deploy to the personal HF Space (msradam/riprap-nyc) only. +# +# This script intentionally never touches the lablab Space (which is +# the AMD-judging artifact). It pushes to the `personal` git remote; +# if that remote does not exist, it creates it. It also swaps +# Dockerfile.l4 → Dockerfile in the working tree on the push branch +# only — the main branch keeps the canonical T4 Dockerfile. +# +# Usage: +# scripts/deploy_personal_space.sh # push current HEAD +# scripts/deploy_personal_space.sh --setup # one-time: add remote, set secrets + +set -euo pipefail + +PERSONAL_REMOTE="personal" +# IMPORTANT: HF redirects / back to the canonical +# org Space if the repo doesn't exist on the personal account. So the +# personal Space MUST have a different repo name (e.g. -mirror or -l4) +# than the org Space, or this script will push to the org Space and +# overwrite the official submission. Configure here. +PERSONAL_URL="https://huggingface.co/spaces/msradam/riprap" +PERSONAL_BRANCH="hf-personal" +LABLAB_NAME_PATTERN="AMD-hackathon|lablab-ai" + +guard_against_lablab () { + # Layer 1: the configured PERSONAL_URL must not contain the org name. + if echo "$PERSONAL_URL" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: PERSONAL_URL ($PERSONAL_URL) matches the lablab org pattern." + exit 1 + fi + # Layer 2: HF's redirect resolution. Follow redirects on the URL + # and check the final landing URL too. This is the layer that + # catches the / shorthand redirect. + final=$(curl -sIL -o /dev/null -w "%{url_effective}" "$PERSONAL_URL") + if echo "$final" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: PERSONAL_URL ($PERSONAL_URL) redirects to a lablab-org URL" + echo " (resolved to: $final)." + echo " The personal Space must have a repo name that does NOT" + echo " collide with the org Space. Pick a unique name and" + echo " create the Space on HF before re-running this." + exit 1 + fi + # Layer 3: configured remotes. + for r in $(git remote); do + url=$(git remote get-url "$r" 2>/dev/null || echo "") + if [ "$r" = "$PERSONAL_REMOTE" ] && echo "$url" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: remote '$PERSONAL_REMOTE' points at the lablab Space ($url)." + exit 1 + fi + done +} + +if [ "${1:-}" = "--setup" ]; then + guard_against_lablab + if ! git remote | grep -q "^${PERSONAL_REMOTE}$"; then + echo "[deploy] adding remote '$PERSONAL_REMOTE' → $PERSONAL_URL" + git remote add "$PERSONAL_REMOTE" "$PERSONAL_URL" + else + existing=$(git remote get-url "$PERSONAL_REMOTE") + if [ "$existing" != "$PERSONAL_URL" ]; then + echo "FATAL: remote '$PERSONAL_REMOTE' exists but points at $existing" + echo " expected: $PERSONAL_URL" + exit 1 + fi + fi + echo "[deploy] set the following secrets in the personal Space (Settings → Variables and secrets):" + echo " HF_TOKEN " + echo " RIPRAP_LLM_PRIMARY ollama" + echo " RIPRAP_ML_BACKEND remote" + echo " (optional) any GLiNER / embedding HF tokens" + exit 0 +fi + +guard_against_lablab + +if ! git remote | grep -q "^${PERSONAL_REMOTE}$"; then + echo "FATAL: remote '$PERSONAL_REMOTE' is not configured. Run with --setup first." + exit 1 +fi + +# Build a deploy branch with NO history — HF Spaces scans the full +# branch ancestry for binary files and rejects the push if any commit +# anywhere in history contains an unmigrated binary. So we orphan a +# fresh branch from the current tree, prune non-app artifacts, swap +# the Dockerfile and entrypoint, and force-push that single commit. +DEPLOY_TMP="$(git rev-parse --show-toplevel)/.deploy-tmp-l4" +rm -rf "$DEPLOY_TMP" +git worktree add --detach "$DEPLOY_TMP" HEAD + +( + cd "$DEPLOY_TMP" + + # Orphan branch — single commit, no ancestry. + git checkout --orphan "$PERSONAL_BRANCH" + + # Strip artifacts that don't ship to the running Space. Keep + # corpus/ — it's the policy-document RAG corpus the FSM reads at + # runtime, and the Dockerfile COPYs it. + rm -rf slides/ submission/ docs/ pitch/ research/ \ + assets/screenshots/ \ + assets/cover.png assets/cover-*.png assets/cover-v*.png \ + assets/logo-paper@2x.png assets/logo@2x.png \ + assets/video/ \ + ARCHITECTURE.md METHODOLOGY.md RESEARCH.md \ + NOTICE LICENSE \ + tests/ experiments/ \ + Dockerfile.app docker-compose.yml \ + README.md + # Swap Dockerfile + entrypoint to the L4 variants. + cp Dockerfile.l4 Dockerfile + cp entrypoint.l4.sh entrypoint.sh + chmod +x entrypoint.sh + rm -f Dockerfile.l4 entrypoint.l4.sh + + # Minimal Space-facing README with HF Space frontmatter. + cat > README.md <<'README' +--- +title: Riprap NYC (Personal Mirror, L4) +emoji: 🌊 +colorFrom: blue +colorTo: indigo +sdk: docker +pinned: false +short_description: NYC flood-exposure briefings on L4 (self-contained). +--- + +# Riprap — NYC flood-exposure briefings (L4 self-contained mirror) + +This Space is a self-contained mirror of +[`github.com/msradam/riprap-nyc`](https://github.com/msradam/riprap-nyc). + +It runs on a single L4 GPU and co-hosts everything in one container: +Granite 4.1 8B (via Ollama), Prithvi-EO 2.0 NYC-Pluvial, TerraMind +LULC + Buildings LoRAs, and Granite TTM r2 — no external droplet +dependency. Sleeps on idle; first request after sleep takes ~45–60 s +to wake. + +The hackathon submission Space (CPU UI, droplet proxy) lives at +[`AMD-hackathon/riprap-nyc`](https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space). + +Apache 2.0. See the GitHub repo for full source, architecture +deep-dive, methodology, and licence map. +README + + git add -A + git -c user.email=msrahmanadam@gmail.com -c user.name="Adam Munawar Rahman" \ + commit -m "deploy(l4): self-contained Riprap mirror" + + echo "[deploy] pushing $PERSONAL_BRANCH → $PERSONAL_REMOTE main ..." + git push --force "$PERSONAL_REMOTE" "${PERSONAL_BRANCH}:main" +) + +git worktree remove --force "$DEPLOY_TMP" +git branch -D "$PERSONAL_BRANCH" 2>/dev/null || true +echo "[deploy] done. Watch build at: ${PERSONAL_URL}" diff --git a/scripts/deploy_vllm_space.sh b/scripts/deploy_vllm_space.sh new file mode 100755 index 0000000000000000000000000000000000000000..f16c37d575c6ec5f61a4d46ac72ce0995cfa6c2f --- /dev/null +++ b/scripts/deploy_vllm_space.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +# Deploy the Riprap vLLM Space (msradam/riprap-vllm) — primary inference +# backend, parallel to the Ollama-backed riprap-inference fallback. +# +# Same orphan-branch pattern as the other deploy scripts. + +set -euo pipefail + +REMOTE="vllm" +URL="https://huggingface.co/spaces/msradam/riprap-vllm" +BRANCH="hf-vllm" +LABLAB_NAME_PATTERN="AMD-hackathon|lablab-ai" +SOURCE_DIR="inference-vllm" + +guard_against_lablab () { + if echo "$URL" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: URL ($URL) matches the lablab org pattern." + exit 1 + fi + final=$(curl -sIL -o /dev/null -w "%{url_effective}" "$URL") + if echo "$final" | grep -qE "$LABLAB_NAME_PATTERN"; then + echo "FATAL: URL ($URL) redirects to lablab-org URL ($final)" + exit 1 + fi +} + +if [ "${1:-}" = "--setup" ]; then + guard_against_lablab + if ! git remote | grep -q "^${REMOTE}$"; then + echo "[deploy.vllm] adding remote '$REMOTE' → $URL" + git remote add "$REMOTE" "$URL" + fi + echo "[deploy.vllm] set RIPRAP_PROXY_TOKEN secret on the Space" + exit 0 +fi + +guard_against_lablab + +DEPLOY_TMP="$(git rev-parse --show-toplevel)/.deploy-tmp-vllm" +rm -rf "$DEPLOY_TMP" +git worktree add --detach "$DEPLOY_TMP" HEAD + +( + cd "$DEPLOY_TMP" + git checkout --orphan "$BRANCH" + + rm -rf slides/ submission/ docs/ pitch/ research/ corpus/ \ + assets/ \ + tests/ experiments/ \ + data/ \ + web/ app/ scripts/ \ + inference/ \ + Dockerfile Dockerfile.app Dockerfile.l4 \ + docker-compose.yml entrypoint.sh entrypoint.l4.sh \ + pyproject.toml uv.lock \ + agent.py riprap.py helios_nyc.py \ + ARCHITECTURE.md METHODOLOGY.md RESEARCH.md \ + LICENSE NOTICE README.md requirements*.txt + + mv "${SOURCE_DIR}/Dockerfile" ./Dockerfile + mv "${SOURCE_DIR}/entrypoint.sh" ./entrypoint.sh + mv "${SOURCE_DIR}/proxy.py" ./proxy.py + rmdir "$SOURCE_DIR" 2>/dev/null || true + chmod +x entrypoint.sh + + find services -mindepth 1 -maxdepth 1 -not -name riprap-models -exec rm -rf {} + + find services/riprap-models -mindepth 1 \ + -not -name main.py -not -name requirements.txt -exec rm -rf {} + + + cat > README.md <<'README' +--- +title: Riprap vLLM (Headless GPU API) +emoji: 🌊 +colorFrom: blue +colorTo: indigo +sdk: docker +pinned: false +short_description: vLLM-backed Granite 4.1 8B FP8 + EO stack for Riprap. +--- + +# Riprap vLLM Space + +Primary headless GPU API for [Riprap](https://github.com/msradam/riprap-nyc). +Runs Granite 4.1 8B FP8 via vLLM (OpenAI-compatible) and the +riprap-models specialist service (Prithvi-EO 2.0 NYC-Pluvial, +TerraMind LULC + Buildings, Granite TTM r2, Granite Embedding, +GLiNER) behind a single FastAPI bearer-auth proxy on port 7860. + +A parallel Ollama-backed Space (`msradam/riprap-inference`) serves +the same surface as a fallback when this one is paused or rebuilding. + +Apache 2.0. Source: https://github.com/msradam/riprap-nyc. +README + + git add -A + git -c user.email=msrahmanadam@gmail.com -c user.name="Adam Munawar Rahman" \ + commit -m "deploy(vllm): vLLM-backed Granite 4.1 8B FP8 inference Space" + + echo "[deploy.vllm] pushing $BRANCH → $REMOTE main ..." + git push --force "$REMOTE" "${BRANCH}:main" +) + +git worktree remove --force "$DEPLOY_TMP" +git branch -D "$BRANCH" 2>/dev/null || true +echo "[deploy.vllm] done. Watch build at: ${URL}" diff --git a/scripts/dry_run.py b/scripts/dry_run.py new file mode 100644 index 0000000000000000000000000000000000000000..0ce7dfe166545b72ad708f2272913bacb76d6a65 --- /dev/null +++ b/scripts/dry_run.py @@ -0,0 +1,127 @@ +"""Quick end-to-end sanity check. + +Exercises every public route once and prints a summary. Catches: + - 404/500s on routes + - missing static assets + - broken /api/stream or /api/compare SSE + - missing register data + - hallucination drops > N + +Run while the server is up: + python scripts/dry_run.py +""" +from __future__ import annotations + +import json +import sys +import time + +import httpx + +BASE = "http://127.0.0.1:8765" + + +def check(label: str, fn): + t0 = time.time() + try: + ok, detail = fn() + elapsed = time.time() - t0 + marker = "✓" if ok else "✗" + print(f" {marker} {label:42s} ({elapsed:5.2f}s) {detail}") + return ok + except Exception as e: + elapsed = time.time() - t0 + print(f" ✗ {label:42s} ({elapsed:5.2f}s) EXCEPTION: {type(e).__name__}: {e}") + return False + + +def get_status(path: str) -> tuple[bool, str]: + r = httpx.get(BASE + path, timeout=10) + return r.status_code == 200, f"HTTP {r.status_code} ({len(r.content)} bytes)" + + +def stream_one(query: str) -> tuple[bool, str]: + with httpx.stream("GET", BASE + f"/api/stream?q={query}", timeout=120) as r: + if r.status_code != 200: + return False, f"HTTP {r.status_code}" + steps = 0; final = None + for line in r.iter_lines(): + if line.startswith("data: "): + d = json.loads(line[6:]) + if d.get("kind") == "step": steps += 1 + elif d.get("kind") == "final": final = d + if not final: + return False, f"no final event (steps={steps})" + dropped = len((final.get("audit") or {}).get("dropped") or []) + en = final.get("energy") or {} + return True, (f"steps={steps}, dropped={dropped}, " + f"energy={en.get('local_mwh','?')} mWh local") + + +def compare_one(a: str, b: str) -> tuple[bool, str]: + with httpx.stream("GET", BASE + f"/api/compare?a={a}&b={b}", timeout=120) as r: + if r.status_code != 200: + return False, f"HTTP {r.status_code}" + finals = {} + steps = 0 + for line in r.iter_lines(): + if line.startswith("data: "): + d = json.loads(line[6:]) + if d.get("kind") == "step": steps += 1 + elif d.get("kind") == "final": finals[d.get("side")] = d + if "a" not in finals or "b" not in finals: + return False, f"missing final (got {list(finals)})" + return True, f"both sides done; steps={steps}" + + +def register_check(asset_class: str) -> tuple[bool, str]: + r = httpx.get(BASE + f"/api/register/{asset_class}", timeout=10) + if r.status_code == 503: + return False, "register not built" + if r.status_code != 200: + return False, f"HTTP {r.status_code}" + data = r.json() + rows = data.get("rows", []) + tiers = {1: 0, 2: 0, 3: 0} + for r_ in rows: + tiers[r_.get("tier", 0)] = tiers.get(r_.get("tier", 0), 0) + 1 + return True, f"{len(rows)} rows · tier1={tiers.get(1,0)} t2={tiers.get(2,0)} t3={tiers.get(3,0)}" + + +def main(): + print(f"=== Riprap dry-run vs {BASE} ===\n") + + print("[Pages]") + check("/", lambda: get_status("/")) + check("/compare", lambda: get_status("/compare")) + check("/register/schools", lambda: get_status("/register/schools")) + check("/register/nycha", lambda: get_status("/register/nycha")) + check("/static/style.css", lambda: get_status("/static/style.css")) + check("/static/app.js", lambda: get_status("/static/app.js")) + check("/static/compare.js", lambda: get_status("/static/compare.js")) + check("/static/register.js",lambda: get_status("/static/register.js")) + fontf = "/static/vendor/nyco/fonts/IBM-Plex-Sans/IBMPlexSans-Regular.woff2" + check(fontf, lambda: get_status(fontf)) + + print("\n[API: layer endpoints]") + check("/api/layers/sandy", lambda: get_status("/api/layers/sandy?lat=40.59&lon=-73.77&r=1500")) + check("/api/layers/dep_extreme_2080", + lambda: get_status("/api/layers/dep_extreme_2080?lat=40.59&lon=-73.77&r=1500")) + check("/api/floodnet_near", lambda: get_status("/api/floodnet_near?lat=40.59&lon=-73.77&r=1000")) + + print("\n[API: register endpoints]") + check("/api/register/schools", lambda: register_check("schools")) + check("/api/register/nycha", lambda: register_check("nycha")) + + print("\n[Streams]") + check("stream · 180 Beach 35 St", + lambda: stream_one("180 Beach 35 St, Queens")) + check("stream · Empire State (cleaner case)", + lambda: stream_one("350 5 Avenue, Manhattan")) + check("compare · Hollis vs Empire State", + lambda: compare_one("153-09 90 Avenue Jamaica Queens", + "350 5 Avenue Manhattan")) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/fetch_ida_hwms.py b/scripts/fetch_ida_hwms.py new file mode 100644 index 0000000000000000000000000000000000000000..499c496bb74a792e6ce4fb1816d3df129cf70783 --- /dev/null +++ b/scripts/fetch_ida_hwms.py @@ -0,0 +1,54 @@ +"""One-shot fetch of NYC Hurricane Ida 2021 high-water marks from USGS STN. + +Output: data/ida_2021_hwms_ny.geojson — point GeoJSON with elev_ft + site +metadata. Used by the Riprap agent's `step_ida_hwm` action as the +empirical post-event flood signal (the same role Prithvi-EO plays for +SAR-derived extents in the parent project). +""" +from __future__ import annotations + +import json +import sys +from pathlib import Path + +import httpx + +OUT = Path(__file__).resolve().parent.parent / "data" / "ida_2021_hwms_ny.geojson" +URL = "https://stn.wim.usgs.gov/STNServices/HWMs/FilteredHWMs.json" + + +def main() -> int: + print("fetching USGS STN Ida 2021 NY HWMs...", file=sys.stderr) + r = httpx.get(URL, params={"Event": 312, "States": "NY"}, timeout=60) + r.raise_for_status() + data = r.json() + + features = [] + for d in data: + lat = d.get("latitude"); lon = d.get("longitude") + if lat is None or lon is None: + continue + features.append({ + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [lon, lat]}, + "properties": { + "hwm_id": d.get("hwm_id"), + "site_no": d.get("site_no"), + "elev_ft": d.get("elev_ft"), + "height_above_gnd": d.get("height_above_gnd"), + "hwm_type": d.get("hwmTypeName"), + "hwm_quality": d.get("hwmQualityName"), + "county": d.get("countyName"), + "site_description": d.get("siteDescription"), + "waterbody": d.get("waterbody"), + }, + }) + OUT.parent.mkdir(exist_ok=True, parents=True) + OUT.write_text(json.dumps({"type": "FeatureCollection", "features": features})) + print(f"wrote {len(features)} HWMs -> {OUT} ({OUT.stat().st_size // 1024} KB)", + file=sys.stderr) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/fetch_nyc_dem.py b/scripts/fetch_nyc_dem.py new file mode 100644 index 0000000000000000000000000000000000000000..7655fe5b4a5b65af7ed5d013466dc8acb078bc83 --- /dev/null +++ b/scripts/fetch_nyc_dem.py @@ -0,0 +1,50 @@ +"""One-shot fetch of an NYC-wide DEM for the microtopo specialist. + +Run this once before launching the agent or web UI: + + python scripts/fetch_nyc_dem.py + +Output: data/nyc_dem_30m.tif (~few MB at 30 m, citywide). +We use 30 m resolution for the precomputed tile because at higher +resolution the file gets large and microtopo metrics (200/750 m +windows) don't need 10 m granularity. +""" +from __future__ import annotations + +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +import py3dep # noqa: E402 + +DATA = Path(__file__).resolve().parent.parent / "data" +OUT = DATA / "nyc_dem_30m.tif" + +# NYC bbox (lon_min, lat_min, lon_max, lat_max) plus a bit of padding +NYC_BBOX = (-74.30, 40.45, -73.65, 40.95) + + +def main() -> int: + if OUT.exists(): + print(f"already exists: {OUT}", file=sys.stderr) + return 0 + DATA.mkdir(exist_ok=True, parents=True) + print(f"fetching NYC DEM @ 30 m for bbox {NYC_BBOX}", file=sys.stderr) + dem = py3dep.get_dem(NYC_BBOX, resolution=30) + print(f" shape: {dem.shape}", file=sys.stderr) + # Reproject to WGS84 if needed + try: + if dem.rio.crs and dem.rio.crs.to_epsg() != 4326: + dem = dem.rio.reproject("EPSG:4326") + print(" reprojected to EPSG:4326", file=sys.stderr) + except Exception: + pass + dem.rio.to_raster(str(OUT), compress="DEFLATE", dtype="float32") + print(f"wrote {OUT} ({OUT.stat().st_size // 1024} KB)", file=sys.stderr) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/probe_50.py b/scripts/probe_50.py new file mode 100644 index 0000000000000000000000000000000000000000..98557242b61adb2865df14dcb0fe15e54500476e --- /dev/null +++ b/scripts/probe_50.py @@ -0,0 +1,277 @@ +"""50-query validation sweep against the live HF Space. + +Usage: + python3 scripts/probe_50.py [--base URL] [--concurrency N] [--timeout S] + +Default base: https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space +""" + +import argparse +import asyncio +import json +import time +from pathlib import Path +from urllib.parse import quote + +import aiohttp + +BASE = "https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space" +QUERIES_FILE = Path("tests/queries_50.json") +RESULTS_FILE = Path("tests/probe_50_results.json") +CONCURRENCY = 3 +TIMEOUT_S = 120 + +STEP_STONE_MAP = { + "sandy_inundation": "sandy", + "dep_stormwater": "dep", + "nyc311": "311", + "floodnet": "floodnet", + "floodnet_forecast": "floodnet", + "noaa_tides": "noaa", + "nws_alerts": "nws", + "nws_obs": "nws", + "microtopo_lidar": "microtopo", + "ida_hwm_2021": "ida", + "ttm_forecast": "ttm", + "ttm_battery_surge": "ttm", + "ttm_311_forecast": "ttm", + "prithvi_eo_v2": "prithvi_v2", + "prithvi_eo_live": "prithvi_live", + "gliner_extract": "gliner", + "rag_granite_embedding": "rag", + "mellea_reconcile_address": "mellea", + "geocode": None, + "mta_entrance_exposure": "mta", + "terramind_synthesis": "terramind", +} + + +def _parse_sse(chunk: str): + events = [] + event_type = "message" + data_lines = [] + for line in chunk.splitlines(): + if line.startswith("event:"): + event_type = line[6:].strip() + elif line.startswith("data:"): + data_lines.append(line[5:].strip()) + elif line == "" and data_lines: + raw = " ".join(data_lines) + try: + payload = json.loads(raw) + except json.JSONDecodeError: + payload = {"raw": raw} + events.append((event_type, payload)) + event_type = "message" + data_lines = [] + return events + + +async def stream_query(session: aiohttp.ClientSession, query_obj: dict, base: str, timeout_s: float) -> dict: + qid = query_obj["id"] + query = query_obj["query"] + url = f"{base}/api/agent/stream?q={quote(query)}" + + result = { + "id": qid, + "query": query, + "status": "ERROR", + "wall_clock_s": None, + "intent_returned": None, + "mellea_passed": None, + "mellea_rerolls": 0, + "stones_fired": [], + "stones_errored": [], + "stones_silent": [], + "citations_resolved": None, + "compare_targets": None, + "error": None, + } + + t0 = time.monotonic() + buf = "" + plan_seen = False + final_seen = False + + try: + async with session.get(url, timeout=aiohttp.ClientTimeout(total=timeout_s + 10)) as resp: + if resp.status != 200: + result["error"] = f"HTTP {resp.status}" + result["wall_clock_s"] = round(time.monotonic() - t0, 2) + return result + + deadline = t0 + timeout_s + async for chunk in resp.content.iter_any(): + if time.monotonic() > deadline: + result["status"] = "TIMEOUT" + result["wall_clock_s"] = round(time.monotonic() - t0, 2) + return result + + buf += chunk.decode("utf-8", errors="replace") + # process complete SSE blocks (separated by double-newline) + while "\n\n" in buf: + block, buf = buf.split("\n\n", 1) + for evt_type, payload in _parse_sse(block + "\n\n"): + if evt_type == "plan": + plan_seen = True + result["intent_returned"] = payload.get("intent") + targets = payload.get("targets", []) + if result["intent_returned"] == "compare": + result["compare_targets"] = len(targets) + + elif evt_type == "step": + step = payload.get("step", "") + ok = payload.get("ok") + if step in STEP_STONE_MAP and STEP_STONE_MAP[step]: + stone = STEP_STONE_MAP[step] + if ok is True: + if stone not in result["stones_fired"]: + result["stones_fired"].append(stone) + elif ok is False: + if stone not in result["stones_errored"]: + result["stones_errored"].append(stone) + + elif evt_type == "final": + final_seen = True + mellea = payload.get("mellea") or {} + req_passed = len(mellea.get("requirements_passed") or []) + req_total = mellea.get("requirements_total") or 4 + result["mellea_passed"] = f"{req_passed}/{req_total}" + result["mellea_rerolls"] = (mellea.get("rerolls") or 0) + audit = payload.get("audit") or {} + result["citations_resolved"] = audit.get("citations_resolved") + + elif evt_type == "error": + result["error"] = payload.get("err", "unknown error") + + elif evt_type == "done": + result["wall_clock_s"] = round(time.monotonic() - t0, 2) + if final_seen: + result["status"] = "PASS" + else: + result["status"] = "ERROR" + if not result["error"]: + result["error"] = "done without final event" + return result + + except asyncio.TimeoutError: + result["status"] = "TIMEOUT" + except Exception as exc: + result["status"] = "ERROR" + result["error"] = str(exc) + + result["wall_clock_s"] = round(time.monotonic() - t0, 2) + return result + + +async def run_all(queries: list, base: str, timeout_s: float, concurrency: int) -> list: + sem = asyncio.Semaphore(concurrency) + results = [] + early_stop = False + + connector = aiohttp.TCPConnector(limit=concurrency + 2) + async with aiohttp.ClientSession(connector=connector) as session: + + async def bounded(qobj): + nonlocal early_stop + if early_stop: + return {**qobj, "status": "SKIPPED", "wall_clock_s": None, "error": "early stop"} + async with sem: + r = await stream_query(session, qobj, base, timeout_s) + tag = f"[{r['id']}]" + wc = f"{r['wall_clock_s']:.1f}s" if r["wall_clock_s"] else "?" + mel = r.get("mellea_passed") or "-" + rr = r.get("mellea_rerolls") or 0 + print(f"{tag} {r['status']} {wc} mellea={mel} rerolls={rr}", flush=True) + return r + + tasks = [asyncio.create_task(bounded(q)) for q in queries] + + done_count = 0 + for coro in asyncio.as_completed(tasks): + r = await coro + results.append(r) + done_count += 1 + # Check early-stop: >10 failures in first 20 + if done_count <= 20: + bad = sum(1 for x in results if x["status"] in ("TIMEOUT", "ERROR")) + if bad > 10: + print(f"\nEARLY STOP: {bad} failures in first {done_count} queries — Space appears degraded.", flush=True) + early_stop = True + + # Sort by original query order + id_order = {q["id"]: i for i, q in enumerate(queries)} + results.sort(key=lambda r: id_order.get(r["id"], 999)) + return results + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--base", default=BASE) + ap.add_argument("--concurrency", type=int, default=CONCURRENCY) + ap.add_argument("--timeout", type=float, default=TIMEOUT_S) + args = ap.parse_args() + + queries = json.loads(QUERIES_FILE.read_text()) + print(f"Running {len(queries)} queries against {args.base} (concurrency={args.concurrency}, timeout={args.timeout}s)\n", flush=True) + + results = asyncio.run(run_all(queries, args.base, args.timeout, args.concurrency)) + + # Write results + RESULTS_FILE.write_text(json.dumps(results, indent=2)) + print(f"\nResults written to {RESULTS_FILE}") + + # Update verified flags in queries file + passed_ids = {r["id"] for r in results if r["status"] == "PASS"} + for q in queries: + if q["id"] in passed_ids: + q["verified"] = True + QUERIES_FILE.write_text(json.dumps(queries, indent=2)) + print(f"Updated verified flags in {QUERIES_FILE}") + + # Summary + total = len(results) + passed = sum(1 for r in results if r["status"] == "PASS") + timed_out = sum(1 for r in results if r["status"] == "TIMEOUT") + errored = sum(1 for r in results if r["status"] == "ERROR") + skipped = sum(1 for r in results if r["status"] == "SKIPPED") + wall_clocks = [r["wall_clock_s"] for r in results if r["status"] == "PASS" and r["wall_clock_s"]] + avg_wall = sum(wall_clocks) / len(wall_clocks) if wall_clocks else 0 + max_wall = max(wall_clocks) if wall_clocks else 0 + mellea_perfect = sum(1 for r in results if r.get("mellea_passed") == "4/4") + + print(f"\n{'='*60}") + print(f"Total: {total}") + print(f"PASS: {passed} ({100*passed//total if total else 0}%)") + print(f"TIMEOUT: {timed_out}") + print(f"ERROR: {errored}") + if skipped: + print(f"SKIPPED: {skipped} (early stop)") + print(f"Avg wall-clock: {avg_wall:.1f}s (passing queries)") + print(f"Max wall-clock: {max_wall:.1f}s") + print(f"Mellea 4/4: {mellea_perfect} ({100*mellea_perfect//total if total else 0}%)") + + failures = [r for r in results if r["status"] != "PASS"] + if failures: + print("\n--- FAILURES ---") + for r in failures: + print(f" [{r['id']}] {r['status']} — {r['query'][:60]}") + if r.get("error"): + print(f" err: {r['error'][:80]}") + + slowest = sorted([r for r in results if r.get("wall_clock_s")], key=lambda x: x["wall_clock_s"], reverse=True)[:5] + print("\n--- SLOWEST 5 ---") + for r in slowest: + print(f" [{r['id']}] {r['wall_clock_s']:.1f}s — {r['query'][:60]}") + + high_rr = [r for r in results if (r.get("mellea_rerolls") or 0) > 1] + if high_rr: + print("\n--- HIGH REROLLS (>1) ---") + for r in high_rr: + print(f" [{r['id']}] rerolls={r['mellea_rerolls']} — {r['query'][:60]}") + + print(f"{'='*60}") + + +if __name__ == "__main__": + main() diff --git a/scripts/probe_addresses.py b/scripts/probe_addresses.py new file mode 100644 index 0000000000000000000000000000000000000000..e450f9ae30205f6ec6ddc7c36a74d49935736604 --- /dev/null +++ b/scripts/probe_addresses.py @@ -0,0 +1,432 @@ +"""Riprap end-to-end address test suite. + +Drives `/api/agent/stream` against a curated set of NYC addresses and +asserts that every Stone fires (or fails to fire with a deterministic +reason), the briefing prose contains all four sections, Mellea +grounding passes within attempt budget, and no specialist crashes with +an internal-API error (PreTrainedModel ModuleNotFoundError, etc). + +Designed to be runnable both locally (M3 → laptop) and against the +deployed HF Space. The remote ML stack on the AMD MI300X is the same in +both cases when the env is configured, so an address that passes here +is the same address the hackathon judges will see. + +Usage: + .venv/bin/python scripts/probe_addresses.py + .venv/bin/python scripts/probe_addresses.py --base http://127.0.0.1:7860 + .venv/bin/python scripts/probe_addresses.py \\ + --base https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space \\ + --addresses "PS 188, Lower East Side" + .venv/bin/python scripts/probe_addresses.py --json outputs/probe_addresses.json + +Exit code 0 if every address passes every assertion; 1 otherwise. CSV +goes to outputs/probe_addresses.csv; JSON dump (full payloads, useful +for the UI dev loop) optionally to --json. +""" +from __future__ import annotations + +import argparse +import csv +import json +import sys +import time +from collections import defaultdict +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any +from urllib.parse import quote + +import httpx + +# Curated probe set. Each entry exercises a different surface of the +# system; together they cover every Stone's specialists at least once. +DEFAULT_ADDRESSES: list[dict[str, Any]] = [ + # Anchor each entry on a fully-qualified street address so the + # geocoder doesn't drift to a same-named landmark in another borough + # (e.g. there are several "PS 188" schools city-wide). + { + "query": "442 East Houston Street, Manhattan", # PS 188 LES + "intent": "single_address", + "expect_sandy": True, # in the empirical 2012 extent + "expect_311_ge": 1, + "borough": "Manhattan", + }, + { + "query": "80 Pioneer Street, Brooklyn", + "intent": "single_address", + "expect_sandy": True, # Red Hook — canonical Sandy turf + "expect_311_ge": 1, + "expect_terramind_lulc_polygons": True, # EO map-layer wiring check + "borough": "Brooklyn", + }, + { + "query": "100 Gold Street, Manhattan", + "intent": "single_address", + # Outside Sandy 2012; this is the negative-control address. + "expect_sandy": False, + "borough": "Manhattan", + }, + { + "query": "Hollis, Queens", + "intent": "neighborhood", + "borough": "Queens", + }, + { + "query": "Coney Island, Brooklyn", + "intent": "neighborhood", + # neighborhood intent doesn't surface a per-address sandy field + # in the final state — the briefing prose names the Sandy + # exposure narratively from RAG + DEP layers. + "borough": "Brooklyn", + }, +] + + +@dataclass +class StoneSummary: + fired: int = 0 + errored: int = 0 + silent: int = 0 + total_seen: int = 0 + + +@dataclass +class RunResult: + query: str + elapsed_s: float = 0.0 + intent: str | None = None + paragraph: str = "" + n_steps: int = 0 + steps: list[dict[str, Any]] = field(default_factory=list) + final: dict[str, Any] = field(default_factory=dict) + attempts: list[dict[str, Any]] = field(default_factory=list) + stones: dict[str, StoneSummary] = field(default_factory=lambda: defaultdict(StoneSummary)) + errors: list[str] = field(default_factory=list) + error_steps: list[str] = field(default_factory=list) + + +# Mapping mirrors web/sveltekit/src/lib/client/cardAdapter.ts:stoneForStep. +# Kept here so the probe doesn't need to read the bundled JS. +def _stone_for_step(step: str) -> str | None: + n = (step or "").lower() + if n in {"sandy_inundation", "dep_stormwater", "ida_hwm_2021", + "prithvi_eo_v2", "microtopo_lidar"}: + return "cornerstone" + if n in {"mta_entrance_exposure", "nycha_development_exposure", + "doe_school_exposure", "doh_hospital_exposure", + "terramind_synthesis", "terramind_buildings", "eo_chip_fetch"}: + return "keystone" + if n in {"floodnet", "nyc311", "nws_obs", "noaa_tides", + "prithvi_eo_live", "terramind_lulc"}: + return "touchstone" + if n in {"nws_alerts", "ttm_forecast", "ttm_311_forecast", + "floodnet_forecast", "ttm_battery_surge"}: + return "lodestone" + if n.startswith("reconcile") or n.startswith("mellea") or \ + n in {"rag_granite_embedding", "gliner_extract"}: + return "capstone" + return None + + +def stream_one(query: str, base: str, timeout_s: float) -> RunResult: # TODO(cleanup): cc-grade-D (21) + """Drive one SSE run, accumulate every event into a RunResult.""" + url = f"{base}/api/agent/stream?q={quote(query)}" + res = RunResult(query=query) + t0 = time.time() + paragraph = "" + + with httpx.stream("GET", url, timeout=timeout_s) as r: + r.raise_for_status() + ev = None + buf: list[str] = [] + for line in r.iter_lines(): + if line.startswith("event:"): + ev = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + buf.append(line[5:].lstrip()) + elif line == "": + if not (ev and buf): + ev = None + buf = [] + continue + data = "\n".join(buf) + buf = [] + try: + payload = json.loads(data) + except json.JSONDecodeError: + payload = {"_raw": data} + if ev == "plan": + res.intent = payload.get("intent") + elif ev == "step": + res.n_steps += 1 + res.steps.append(payload) + stone = _stone_for_step(payload.get("step", "")) + if stone: + s = res.stones[stone] + s.total_seen += 1 + if not payload.get("ok"): + s.errored += 1 + res.error_steps.append(payload.get("step", "")) + elif payload.get("result") is None and payload.get("err") is None: + s.silent += 1 + else: + s.fired += 1 + elif ev == "token": + paragraph += payload.get("delta") or "" + elif ev == "mellea_attempt": + res.attempts.append(payload) + elif ev == "final": + res.final = payload + if isinstance(payload.get("paragraph"), str): + paragraph = payload["paragraph"] + elif ev == "error": + res.errors.append(str(payload.get("err") or payload)) + ev = None + res.elapsed_s = round(time.time() - t0, 2) + res.paragraph = paragraph + return res + + +# ---- Assertions ---------------------------------------------------------- + +# Flag step-result errors that look like the local-fallback ModuleNotFoundError +# we just hardened against. If any address surfaces this string, the +# guard-rail regressed. +_ERROR_REGRESSIONS = ( + "ModuleNotFoundError", + "Could not import module 'PreTrainedModel'", +) + +# Briefing section headings the system prompt teaches Granite to emit. +# Granite's exact rendering varies per attempt — sometimes +# `**Status.**` on its own line, sometimes inline. We treat each section +# as present if its label appears at all (case-insensitive). +# +# The system prompt says "Omit any section whose supporting facts are +# absent from the documents" — so on a query with no RAG hits the +# Policy-context section is correctly skipped. We require Status + +# Empirical evidence + Modeled scenarios always; Policy context is +# best-effort. +_REQUIRED_HEADINGS = ( + "Status", + "Empirical evidence", + "Modeled scenarios", +) +_OPTIONAL_HEADINGS = ("Policy context",) + + +def assert_run(spec: dict[str, Any], r: RunResult) -> list[str]: # TODO(cleanup): cc-grade-F (42) + """Return a list of failures (empty list if the run passes).""" + fails: list[str] = [] + if r.errors: + fails.append(f"stream errors: {r.errors}") + + # Specialist regressions — the LOD-002/003/004 ModuleNotFoundError + # category. If any step result string contains those keywords we + # treat it as a hard regression of the pre-import hardening. + for step in r.steps: + err = step.get("err") or "" + for marker in _ERROR_REGRESSIONS: + if marker in str(err): + fails.append( + f"{step.get('step')}: {marker} regressed in step error" + ) + + # Intent classification. + expected_intent = spec.get("intent") + if expected_intent and r.intent and r.intent != expected_intent: + fails.append(f"intent={r.intent} expected {expected_intent}") + + # Briefing presence. + if not r.paragraph or len(r.paragraph) < 200: + fails.append(f"briefing too short: {len(r.paragraph)} chars") + else: + para_lower = r.paragraph.lower() + for heading in _REQUIRED_HEADINGS: + if heading.lower() not in para_lower: + fails.append(f"briefing missing heading {heading!r}") + + # Mellea grounding. + final = r.final or {} + m = final.get("mellea") or {} + passed = m.get("requirements_passed") or [] + total = m.get("requirements_total") or 0 + if total: + if len(passed) < total: + failed_names = ",".join(m.get("requirements_failed") or []) or "?" + fails.append( + f"mellea: only {len(passed)}/{total} grounding checks passed " + f"(failed: {failed_names})" + ) + elif r.attempts: + last = r.attempts[-1] + if last.get("failed"): + fails.append(f"mellea: last attempt failed {last['failed']}") + + # Stones — the per-stone requirement is intent-dependent. The + # single_address FSM fires every stone's specialists (Cornerstone / + # Keystone / Touchstone / Lodestone). The neighborhood and + # development_check intents have a smaller fixed surface that does + # not exercise the address-level register / live-now stones — they + # rely on RAG + a smaller set of specialists. So we only enforce + # the full Stone roster for single_address; for the others we just + # check Capstone fires (RAG / GLiNER / reconcile are universal). + intent = (r.intent or expected_intent or "single_address").lower() + if intent == "single_address": + for stone in ("cornerstone", "touchstone", "lodestone"): + s = r.stones.get(stone) + if not s or s.fired == 0: + fails.append( + f"{stone}: 0 specialists fired " + f"(saw {s.total_seen if s else 0})" + ) + s = r.stones.get("keystone") + if not s or s.total_seen == 0: + fails.append("keystone: no specialists attempted") + s = r.stones.get("capstone") + if not s or s.fired == 0: + fails.append( + f"capstone: 0 fired — reconcile/rag/gliner step events missing " + f"(saw {s.total_seen if s else 0})" + ) + + # Spec-driven asserts (only meaningful for single_address — the + # neighborhood / development_check intents have no per-address + # sandy / 311 fields in the final state). + if intent == "single_address": + sandy_state = (final.get("sandy") is True) + if "expect_sandy" in spec: + want = spec["expect_sandy"] + if sandy_state is not want: + fails.append(f"sandy={sandy_state} expected {want}") + n311 = (final.get("nyc311") or {}).get("n") or 0 + if "expect_311_ge" in spec and n311 < spec["expect_311_ge"]: + fails.append(f"nyc311={n311} expected >= {spec['expect_311_ge']}") + + # EO map-layer wiring check: TerraMind LULC must produce polygons + # when its specialist fires (ok=True). Prithvi and TerraMind + # Buildings are accepted as silent — no pluvial flood / no + # building change is a valid result, not a bug. This catches the + # regression where specialists fire but polygons_geojson is + # dropped before reaching the final state. + if spec.get("expect_terramind_lulc_polygons"): + tm = final.get("terramind") or {} + if not tm.get("ok"): + fails.append("terramind_lulc: ok=False — LULC specialist did not fire") + else: + n_poly = len((tm.get("polygons_geojson") or {}).get("features") or []) + if n_poly == 0: + fails.append("terramind_lulc: ok=True but 0 polygons in final state") + + return fails + + +# ---- Entry point --------------------------------------------------------- + +def main() -> int: # TODO(cleanup): cc-grade-D (28) + ap = argparse.ArgumentParser() + ap.add_argument("--base", default="http://127.0.0.1:7860", + help="Riprap server base URL") + ap.add_argument("--addresses", default="", + help="Pipe-separated subset of queries to run " + "(addresses themselves contain commas, so pipe is " + "the separator); default runs the full curated set") + ap.add_argument("--timeout", type=float, default=600.0) + ap.add_argument("--out", default="outputs/probe_addresses.csv") + ap.add_argument("--json", default="", + help="Optional path to dump full per-address JSON payload") + args = ap.parse_args() + + if args.addresses: + wanted = {a.strip() for a in args.addresses.split("|") if a.strip()} + specs = [s for s in DEFAULT_ADDRESSES if s["query"] in wanted] + if not specs: + specs = [{"query": q} for q in wanted] + else: + specs = list(DEFAULT_ADDRESSES) + + Path(args.out).parent.mkdir(parents=True, exist_ok=True) + + summary_rows: list[dict[str, Any]] = [] + full: list[dict[str, Any]] = [] + all_pass = True + + print(f"Probing {len(specs)} addresses against {args.base}") + print() + + for i, spec in enumerate(specs, 1): + q = spec["query"] + print(f"[{i}/{len(specs)}] {q!r:50s}", end=" ", flush=True) + try: + r = stream_one(q, args.base, args.timeout) + except Exception as e: + print(f"STREAM ERROR: {type(e).__name__}: {e}") + summary_rows.append({"query": q, "ok": False, + "fails": f"stream raised: {e}"}) + all_pass = False + continue + fails = assert_run(spec, r) + ok = not fails + all_pass &= ok + m = (r.final or {}).get("mellea") or {} + passed = m.get("requirements_passed") or [] + rerolls = m.get("rerolls") if m.get("rerolls") is not None else \ + (max(0, (m.get("n_attempts") or 1) - 1)) + verdict = "PASS" if ok else "FAIL" + print(f"{verdict} {r.elapsed_s:6.1f}s " + f"steps={r.n_steps} prose={len(r.paragraph)}c " + f"mellea={len(passed)}/{m.get('requirements_total') or '?'} " + f"rerolls={rerolls}") + for f in fails: + print(f" - {f}") + + summary_rows.append({ + "query": q, "ok": ok, "elapsed_s": r.elapsed_s, "intent": r.intent, + "n_steps": r.n_steps, + "para_chars": len(r.paragraph), + "mellea_passed": len(passed), + "mellea_total": m.get("requirements_total") or 0, + "rerolls": rerolls, + "stones_fired": ",".join( + f"{k}={v.fired}" for k, v in sorted(r.stones.items())), + "stones_errored": ",".join( + f"{k}={v.errored}" for k, v in sorted(r.stones.items()) + if v.errored), + "errored_steps": ",".join(r.error_steps), + "fails": " | ".join(fails), + }) + full.append({ + "spec": spec, + "elapsed_s": r.elapsed_s, + "intent": r.intent, + "paragraph": r.paragraph, + "stones": {k: vars(v) for k, v in r.stones.items()}, + "mellea": m, + "attempts": r.attempts, + "errors": r.errors, + "error_steps": r.error_steps, + "fails": fails, + }) + + out_path = Path(args.out) + if summary_rows: + with out_path.open("w", newline="") as f: + w = csv.DictWriter(f, fieldnames=list(summary_rows[0].keys())) + w.writeheader() + w.writerows(summary_rows) + print(f"\nWrote {out_path}") + if args.json: + json_path = Path(args.json) + json_path.parent.mkdir(parents=True, exist_ok=True) + json_path.write_text(json.dumps(full, indent=2, default=str)) + print(f"Wrote {json_path}") + + print() + print("=" * 70) + print(f" {sum(1 for r in summary_rows if r.get('ok'))}/{len(summary_rows)} addresses passed") + print("=" * 70) + return 0 if all_pass else 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/probe_benchmarks.py b/scripts/probe_benchmarks.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd113d8326d27c8920d7a38bead9672ef9fa725 --- /dev/null +++ b/scripts/probe_benchmarks.py @@ -0,0 +1,222 @@ +"""Collect per-query benchmark data from the live lablab UI. + +Runs each query through `/api/agent/stream`, accumulates the full +SSE trace, and emits a JSON record per query with everything the +benchmark page (docs/BENCHMARKS.md) needs: + + - briefing paragraph + - per-Stone fired count (Cornerstone / Keystone / Touchstone / + Lodestone / Capstone) + - by-design / errored skip rows + - Mellea attempts, rerolls, requirements passed/failed + - emissions: total Wh, J, tokens, n_measured, by-kind / by-hardware + - wall-clock start-to-final + - geocode (lat/lon, BBL, BIN) + +Output: JSON written to outputs/benchmarks.json (or `--out`). + +Usage: + PYTHONPATH=. uv run python scripts/probe_benchmarks.py + PYTHONPATH=. uv run python scripts/probe_benchmarks.py \\ + --queries "80 Pioneer Street, Brooklyn" "2508 Beach Channel Drive" + +Defaults to the canonical four addresses from CLAUDE.md. +""" +from __future__ import annotations + +import argparse +import json +import sys +import time +from pathlib import Path +from urllib.parse import quote + +import httpx + +DEFAULT_BASE = "https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space" +DEFAULT_QUERIES = [ + "80 Pioneer Street, Brooklyn", + "2508 Beach Channel Drive, Queens", + "Coney Island I Houses, Brooklyn", + "Carleton Manor Houses, Queens", +] + +STEP_TO_STONE: dict[str, str] = { + "sandy_inundation": "Cornerstone", "dep_stormwater": "Cornerstone", + "ida_hwm_2021": "Cornerstone", "prithvi_eo_v2": "Cornerstone", + "microtopo_lidar": "Cornerstone", "sandy_nta": "Cornerstone", + "dep_extreme_2080_nta": "Cornerstone", "dep_moderate_2050_nta": "Cornerstone", + "dep_moderate_current_nta": "Cornerstone", "microtopo_nta": "Cornerstone", + "mta_entrance_exposure": "Keystone", + "nycha_development_exposure": "Keystone", + "doe_school_exposure": "Keystone", "doh_hospital_exposure": "Keystone", + "terramind_synthesis": "Keystone", "eo_chip_fetch": "Keystone", + "terramind_buildings": "Keystone", + "floodnet": "Touchstone", "nyc311": "Touchstone", + "nws_obs": "Touchstone", "noaa_tides": "Touchstone", + "prithvi_eo_live": "Touchstone", "terramind_lulc": "Touchstone", + "nyc311_nta": "Touchstone", + "nws_alerts": "Lodestone", "ttm_forecast": "Lodestone", + "ttm_311_forecast": "Lodestone", "floodnet_forecast": "Lodestone", + "ttm_battery_surge": "Lodestone", + "reconcile_granite41": "Capstone", + "mellea_reconcile_address": "Capstone", + "reconcile_neighborhood": "Capstone", + "reconcile_development": "Capstone", + "reconcile_live_now": "Capstone", +} + + +def stream_events(base: str, q: str, timeout_s: float): + url = f"{base.rstrip('/')}/api/agent/stream?q={quote(q)}" + with httpx.Client(timeout=timeout_s) as client: + with client.stream("GET", url) as r: + r.raise_for_status() + event = None + for line in r.iter_lines(): + if not line: + event = None + continue + if line.startswith("event:"): + event = line.removeprefix("event:").strip() + elif line.startswith("data:") and event: + body = line.removeprefix("data:").strip() + try: + yield event, json.loads(body) + except Exception: + yield event, {"_raw": body} + + +def collect_one(base: str, q: str, timeout_s: float) -> dict: + print(f"\n== {q!r} ==", flush=True) + t0 = time.time() + fired: dict[str, list[str]] = {s: [] for s in + ("Cornerstone", "Keystone", "Touchstone", + "Lodestone", "Capstone")} + errored: list[dict] = [] + skipped: list[dict] = [] + final: dict | None = None + plan: dict | None = None + n_token_events = 0 + + for event, payload in stream_events(base, q, timeout_s): + if event == "plan": + plan = payload + elif event == "token": + n_token_events += 1 + elif event == "step": + step = payload.get("step", "") + ok = bool(payload.get("ok")) + stone = STEP_TO_STONE.get(step) + if stone and ok: + fired[stone].append(step) + elif not ok: + err = (payload.get("err") or + (payload.get("result") or {}).get("err") or + (payload.get("result") or {}).get("skipped") or "") + row = {"step": step, "stone": stone, "reason": err, + "elapsed_s": payload.get("elapsed_s")} + # Heuristic: by-design skips use neutral language; + # genuine errors usually contain a Python exception type. + blob = err.lower() + is_design_skip = any(p in blob for p in [ + "no entrances within radius", + "only 2 historical", + "no schools within radius", + "no nycha", + "no hospitals within radius", + "out of nyc scope", + "not in nyc pluto", + ]) + if is_design_skip: + skipped.append(row) + else: + errored.append(row) + elif event == "final": + final = payload + + elapsed_s = round(time.time() - t0, 2) + print(f" {elapsed_s}s · token events={n_token_events}", flush=True) + + em = (final or {}).get("emissions") or {} + mel = (final or {}).get("mellea") or {} + geo = (final or {}).get("geocode") or {} + return { + "query": q, + "wallclock_s": elapsed_s, + "n_token_events": n_token_events, + "geocode": { + "address": geo.get("address"), + "lat": geo.get("lat"), + "lon": geo.get("lon"), + "bbl": geo.get("bbl"), + "bin": geo.get("bin"), + "borough": geo.get("borough"), + }, + "plan": { + "intent": (plan or {}).get("intent"), + "specialists": (plan or {}).get("specialists"), + "rationale": (plan or {}).get("rationale"), + }, + "stones": { + stone: {"n_fired": len(steps), "steps": steps} + for stone, steps in fired.items() + }, + "errored": errored, + "skipped_by_design": skipped, + "mellea": { + "n_attempts": mel.get("n_attempts"), + "rerolls": mel.get("rerolls"), + "requirements_passed": mel.get("requirements_passed"), + "requirements_failed": mel.get("requirements_failed"), + "requirements_total": mel.get("requirements_total"), + "model": mel.get("model"), + }, + "emissions": { + "n_calls": em.get("n_calls"), + "n_measured": em.get("n_measured"), + "total_wh": em.get("total_wh"), + "total_mwh": em.get("total_mwh"), + "total_joules": em.get("total_joules"), + "total_duration_s": em.get("total_duration_s"), + "tokens": em.get("tokens"), + "by_kind": em.get("by_kind"), + "by_hardware": em.get("by_hardware"), + }, + "paragraph": (final or {}).get("paragraph"), + "paragraph_chars": len((final or {}).get("paragraph") or ""), + "tier": (final or {}).get("tier"), + } + + +def main() -> int: + p = argparse.ArgumentParser() + p.add_argument("--base", default=DEFAULT_BASE) + p.add_argument("--queries", nargs="*", default=DEFAULT_QUERIES) + p.add_argument("--timeout", type=float, default=600.0) + p.add_argument("--out", default="outputs/benchmarks.json") + args = p.parse_args() + + out_path = Path(args.out) + out_path.parent.mkdir(parents=True, exist_ok=True) + + print(f"== probe_benchmarks ==") + print(f" base : {args.base}") + print(f" queries: {len(args.queries)}") + + runs = [] + for q in args.queries: + try: + runs.append(collect_one(args.base, q, args.timeout)) + except Exception as e: + print(f" FAIL {type(e).__name__}: {e}", flush=True) + runs.append({"query": q, "error": f"{type(e).__name__}: {e}"}) + + out = {"base": args.base, "ts": time.time(), "runs": runs} + out_path.write_text(json.dumps(out, indent=2, default=str)) + print(f"\nwrote {out_path} ({len(runs)} runs)") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/probe_mellea.py b/scripts/probe_mellea.py new file mode 100644 index 0000000000000000000000000000000000000000..d0adafa460bcae22d17922c0d8b423ac5e0b18ab --- /dev/null +++ b/scripts/probe_mellea.py @@ -0,0 +1,127 @@ +"""Programmatic Mellea probe — hit the agent stream N times and dump +per-requirement pass/fail to a CSV so we can see which invariant keeps +failing and decide how to fix. + +Requires the local server running on http://127.0.0.1:7860. + +Usage: + uv run python scripts/probe_mellea.py --query Hollis --runs 5 +""" +from __future__ import annotations + +import argparse +import csv +import json +import time +from pathlib import Path +from urllib.parse import quote + +import httpx + + +def stream_one(query: str, base: str, timeout_s: float) -> dict: + url = f"{base}/api/agent/stream?q={quote(query)}" + t0 = time.time() + final = None + intent = None + attempts = [] # list of {attempt, passed, failed} from mellea_attempt + with httpx.stream("GET", url, timeout=timeout_s) as r: + r.raise_for_status() + ev = None + buf = [] + for line in r.iter_lines(): + if line.startswith("event:"): + ev = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + buf.append(line[5:].lstrip()) + elif line == "": + if ev and buf: + data = "\n".join(buf) + buf = [] + if ev == "plan": + try: + intent = json.loads(data).get("intent") + except json.JSONDecodeError: + pass + elif ev == "mellea_attempt": + try: + attempts.append(json.loads(data)) + except json.JSONDecodeError: + pass + elif ev == "final": + try: + final = json.loads(data) + except json.JSONDecodeError: + final = {"_raw": data} + ev = None + dt = round(time.time() - t0, 2) + return {"final": final or {}, "elapsed_s": dt, "intent": intent, + "attempts": attempts} + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--query", required=True) + ap.add_argument("--runs", type=int, default=5) + ap.add_argument("--base", default="http://127.0.0.1:7860") + ap.add_argument("--timeout", type=float, default=600.0) + ap.add_argument("--out", default="outputs/mellea_probe.csv") + args = ap.parse_args() + + out = Path(args.out) + out.parent.mkdir(parents=True, exist_ok=True) + + rows = [] + for i in range(args.runs): + try: + r = stream_one(args.query, args.base, args.timeout) + except Exception as e: + print(f"[{i+1}/{args.runs}] ERROR: {e!r}") + continue + f = r["final"] + m = f.get("mellea") or {} + passed = m.get("requirements_passed", []) + failed = m.get("requirements_failed", []) + para = f.get("paragraph", "") + row = { + "run": i + 1, + "intent": r.get("intent"), + "elapsed_s": r["elapsed_s"], + "rerolls": m.get("rerolls"), + "n_attempts": m.get("n_attempts"), + "passed_count": len(passed), + "failed_count": len(failed), + "failed": ",".join(failed), + "passed": ",".join(passed), + "para_chars": len(para), + "paragraph": para.replace("\n", " "), + } + # Add per-attempt detail. + for a in r.get("attempts", []): + row[f"attempt{a.get('attempt')}_failed"] = ",".join(a.get("failed", [])) + rows.append(row) + atts = r.get("attempts", []) + att_summary = " | ".join( + f"#{a.get('attempt')}={'✓' if not a.get('failed') else 'fail:'+','.join(a.get('failed', []))}" + for a in atts + ) or "no attempts" + print(f"[{i+1}/{args.runs}] {r['elapsed_s']:6.1f}s final={len(passed)}/4 attempts: {att_summary}") + + if rows: + with out.open("w", newline="") as f: + w = csv.DictWriter(f, fieldnames=list(rows[0].keys())) + w.writeheader() + w.writerows(rows) + print(f"\nWrote {out}") + print("Pass-rate distribution: " + + json.dumps({n: sum(1 for r in rows if r['passed_count'] == n) + for n in range(5)})) + # Show the failed paragraphs for inspection. + for r in rows: + if r['failed_count']: + print(f"\n--- run {r['run']} failed [{r['failed']}] ---") + print(r['paragraph'][:600]) + + +if __name__ == "__main__": + main() diff --git a/scripts/probe_stones_fire.py b/scripts/probe_stones_fire.py new file mode 100644 index 0000000000000000000000000000000000000000..ff012a2dd3708b966d73a5c8ceceec1e10387286 --- /dev/null +++ b/scripts/probe_stones_fire.py @@ -0,0 +1,207 @@ +"""Probe the lablab UI: does every Stone fire on the canonical address, +and is the dep-availability regression that the SAT_MAY_9 run hit +(`RuntimeError: operator torchvision::nms does not exist` on the local +fallback path; `deps unavailable on this deployment: terratorch +(RuntimeError), peft` on TerraMind LULC + Buildings) gone? + +This consumes /api/agent/stream as a curl-style SSE client (no +EventSource needed) and asserts: + 1. Every step event has a Stone mapping (per web/main.py:_STEP_TO_STONE) + 2. All five Stones (Cornerstone, Keystone, Touchstone, Lodestone, + Capstone) emit at least one fired step + 3. No step result mentions: + - "torchvision::nms" + - "deps unavailable on this deployment: terratorch" + - "peft (RuntimeError)" + 4. Final emissions block carries L4 hardware + non-zero tokens + +Usage: + PYTHONPATH=. uv run python scripts/probe_stones_fire.py + PYTHONPATH=. uv run python scripts/probe_stones_fire.py \\ + --base http://127.0.0.1:8000 \\ + --query "Carleton Manor Houses, Queens" + +Exit 0 on success, 1 on any failure. Prints a per-Stone summary. +""" +from __future__ import annotations + +import argparse +import json +import sys +import time +from urllib.parse import quote + +import httpx + +DEFAULT_BASE = "https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space" +DEFAULT_QUERY = "80 Pioneer Street, Brooklyn" + +EXPECTED_STONES = {"Cornerstone", "Keystone", "Touchstone", + "Lodestone", "Capstone"} + +# Step name → Stone, mirrored from web/main.py:_STEP_TO_STONE so this +# script can be run without importing the app package. +STEP_TO_STONE: dict[str, str] = { + "sandy_inundation": "Cornerstone", + "dep_stormwater": "Cornerstone", + "ida_hwm_2021": "Cornerstone", + "prithvi_eo_v2": "Cornerstone", + "microtopo_lidar": "Cornerstone", + "sandy_nta": "Cornerstone", + "dep_extreme_2080_nta": "Cornerstone", + "dep_moderate_2050_nta": "Cornerstone", + "dep_moderate_current_nta": "Cornerstone", + "microtopo_nta": "Cornerstone", + "mta_entrance_exposure": "Keystone", + "nycha_development_exposure": "Keystone", + "doe_school_exposure": "Keystone", + "doh_hospital_exposure": "Keystone", + "terramind_synthesis": "Keystone", + "eo_chip_fetch": "Keystone", + "terramind_buildings": "Keystone", + "floodnet": "Touchstone", + "nyc311": "Touchstone", + "nws_obs": "Touchstone", + "noaa_tides": "Touchstone", + "prithvi_eo_live": "Touchstone", + "terramind_lulc": "Touchstone", + "nyc311_nta": "Touchstone", + "nws_alerts": "Lodestone", + "ttm_forecast": "Lodestone", + "ttm_311_forecast": "Lodestone", + "floodnet_forecast": "Lodestone", + "ttm_battery_surge": "Lodestone", + "reconcile_granite41": "Capstone", + "mellea_reconcile_address": "Capstone", + "reconcile_neighborhood": "Capstone", + "reconcile_development": "Capstone", + "reconcile_live_now": "Capstone", +} + +DEP_REGRESSION_PATTERNS = [ + "torchvision::nms", + "deps unavailable on this deployment: terratorch", + "peft (RuntimeError)", +] + + +def stream_events(base: str, q: str, timeout_s: float = 360.0): + """Yield (event, data_dict) for each SSE record.""" + url = f"{base.rstrip('/')}/api/agent/stream?q={quote(q)}" + with httpx.Client(timeout=timeout_s) as client: + with client.stream("GET", url) as r: + r.raise_for_status() + event = None + for line in r.iter_lines(): + if not line: + event = None + continue + if line.startswith("event:"): + event = line.removeprefix("event:").strip() + elif line.startswith("data:") and event: + body = line.removeprefix("data:").strip() + try: + yield event, json.loads(body) + except Exception: + yield event, {"_raw": body} + + +def main() -> int: + p = argparse.ArgumentParser() + p.add_argument("--base", default=DEFAULT_BASE) + p.add_argument("--query", default=DEFAULT_QUERY) + p.add_argument("--timeout", type=float, default=360.0) + args = p.parse_args() + + print(f"== probe_stones_fire ==") + print(f" base : {args.base}") + print(f" query: {args.query}\n") + + t0 = time.time() + fired: dict[str, list[dict]] = {s: [] for s in EXPECTED_STONES} + errored: list[dict] = [] + dep_regressions: list[dict] = [] + final: dict | None = None + + for event, payload in stream_events(args.base, args.query, args.timeout): + if event == "step": + step = payload.get("step", "") + ok = bool(payload.get("ok")) + stone = STEP_TO_STONE.get(step) + if stone: + if ok: + fired[stone].append(payload) + else: + errored.append(payload) + # Check the result + err strings against regression patterns. + blob = json.dumps(payload, default=str).lower() + for pat in DEP_REGRESSION_PATTERNS: + if pat.lower() in blob: + dep_regressions.append({"pattern": pat, + "step": step, + "payload": payload}) + break + elif event == "final": + final = payload + + elapsed = time.time() - t0 + + # ---- assertions + failures: list[str] = [] + + missing_stones = [s for s in EXPECTED_STONES if not fired[s]] + if missing_stones: + failures.append(f"Stones with no fired step: {missing_stones}") + + if dep_regressions: + for d in dep_regressions[:10]: + failures.append( + f"dep regression in step '{d['step']}': matched '{d['pattern']}'" + ) + + if final is None: + failures.append("no `final` event received") + else: + em = final.get("emissions") or {} + n_calls = em.get("n_calls", 0) + if n_calls == 0: + failures.append("emissions ledger is empty (n_calls=0)") + hw_keys = list((em.get("by_hardware") or {}).keys()) + if hw_keys and "nvidia_l4" not in hw_keys: + failures.append(f"expected nvidia_l4 in emissions; got {hw_keys}") + + # ---- print summary + print(f"-- step events --") + for s in ("Cornerstone", "Keystone", "Touchstone", "Lodestone", "Capstone"): + steps = [p.get("step") for p in fired[s]] + print(f" {s:11s} fired={len(fired[s]):2d} {steps}") + if errored: + print(f"\n-- {len(errored)} step events with ok=False --") + for p in errored[:8]: + err = (p.get("err") or + (p.get("result") or {}).get("err") or + (p.get("result") or {}).get("skipped") or "?") + print(f" {p.get('step'):28s} {err[:140]}") + + if final and (em := final.get("emissions")): + print(f"\n-- emissions --") + print(f" n_calls = {em.get('n_calls')}") + print(f" n_measured = {em.get('n_measured')}") + print(f" total_wh = {em.get('total_wh')}") + print(f" total_joules = {em.get('total_joules')}") + print(f" tokens.total = {(em.get('tokens') or {}).get('total')}") + print(f" by_hardware = {list((em.get('by_hardware') or {}).keys())}") + + print(f"\nelapsed: {elapsed:.1f}s") + + if failures: + print(f"\nFAIL ({len(failures)} issue{'s' if len(failures) != 1 else ''}):") + for f in failures: + print(f" - {f}") + return 1 + print("\nPASS — all 5 Stones fired, no torchvision/terratorch dep regression.") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/redeploy.sh b/scripts/redeploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..89075b1eea378c765d7be7b6262c9372c080a3e4 --- /dev/null +++ b/scripts/redeploy.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash +# Full redeploy to an existing AMD MI300X droplet. +# +# 1. Generate a fresh bearer token +# 2. scripts/deploy_droplet.sh (bring up vLLM + riprap-models) +# 3. scripts/update_hf_env.sh (update HF Space vars + restart) +# 4. .venv/bin/python scripts/probe_addresses.py (5/5 must pass) +# +# Usage: scripts/redeploy.sh +# +# Requires: +# HF auth — either `huggingface-cli login` (preferred) or HF_TOKEN env var +# .venv Python virtual environment with probe_addresses.py deps +# SSH access to the droplet (ssh-agent or SSH_KEY env var) +# +# Exit codes: +# 0 all three steps passed +# 1 deploy_droplet.sh failed (HF Space NOT touched) +# 1 update_hf_env.sh failed (droplet is up but HF Space NOT updated) +# 1 probe_addresses.py failed (deploy + HF update succeeded; not rolled back) +set -euo pipefail + +if [ "$#" -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +IP="$1" + +# Verify HF auth is available before doing the long droplet build. +# Either HF_TOKEN env or a cached CLI login works — HfApi() picks up +# whichever is set. +if ! python3 -c " +import sys +from huggingface_hub import HfApi +try: + HfApi().whoami() +except Exception as e: + print(f'HF auth check failed: {e}', file=sys.stderr) + print('Run: huggingface-cli login (or: export HF_TOKEN=...)', + file=sys.stderr) + sys.exit(1) +" >/dev/null; then + exit 1 +fi + +REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)" +START_SECONDS=$SECONDS + +DEPLOY_STATUS="FAIL" +HF_STATUS="FAIL" +PROBE_STATUS="FAIL" + +# ---- 1. Generate a fresh bearer token ------------------------------------ +# openssl rand -base64 24 produces 32 chars; strip +/= to keep URL-safe. +TOKEN=$(openssl rand -base64 24 | tr -d '/+=') +echo "==> Deploying to ${IP} with fresh token..." +echo + +# ---- 2. deploy_droplet.sh ------------------------------------------------ +if bash "${REPO_ROOT}/scripts/deploy_droplet.sh" "$IP" "$TOKEN"; then + DEPLOY_STATUS="PASS" +else + echo "deploy_droplet.sh failed" >&2 + # Print summary before exiting so the caller sees partial state. + ELAPSED=$(( SECONDS - START_SECONDS )) + echo + echo "=== redeploy summary ===" + echo "Droplet IP : ${IP}" + echo "Token : (not set — deploy failed before token was registered)" + echo "Deploy : ${DEPLOY_STATUS}" + echo "HF Space : ${HF_STATUS}" + echo "E2E probe : ${PROBE_STATUS}" + printf "Total time : %dm%02ds\n" $(( ELAPSED / 60 )) $(( ELAPSED % 60 )) + exit 1 +fi + +echo +echo "==> Deploy succeeded. Updating HF Space..." +echo + +# ---- 3. update_hf_env.sh ------------------------------------------------- +if bash "${REPO_ROOT}/scripts/update_hf_env.sh" "$IP" "$TOKEN"; then + HF_STATUS="PASS" +else + echo "update_hf_env.sh failed. HF Space NOT updated." >&2 + ELAPSED=$(( SECONDS - START_SECONDS )) + echo + echo "=== redeploy summary ===" + echo "Droplet IP : ${IP}" + echo "Token : (regenerated, see HF Space vars)" + echo "Deploy : ${DEPLOY_STATUS}" + echo "HF Space : ${HF_STATUS}" + echo "E2E probe : ${PROBE_STATUS}" + printf "Total time : %dm%02ds\n" $(( ELAPSED / 60 )) $(( ELAPSED % 60 )) + exit 1 +fi + +echo +echo "==> HF Space updated. Running end-to-end probe..." +echo + +# ---- 4. probe_addresses.py ----------------------------------------------- +# probe_addresses.py exits 0 only when 5/5 pass (from docs/DROPLET-RUNBOOK.md). +# Disable set -e for this step so we can capture the exit code and still +# print the summary. +set +e +"${REPO_ROOT}/.venv/bin/python" "${REPO_ROOT}/scripts/probe_addresses.py" +PROBE_EXIT=$? +set -e + +if [ "$PROBE_EXIT" -eq 0 ]; then + PROBE_STATUS="PASS" +else + PROBE_STATUS="FAIL" +fi + +# ---- 5. Summary ---------------------------------------------------------- +ELAPSED=$(( SECONDS - START_SECONDS )) +echo +echo "=== redeploy summary ===" +echo "Droplet IP : ${IP}" +echo "Token : (regenerated, see HF Space vars)" +echo "Deploy : ${DEPLOY_STATUS}" +echo "HF Space : ${HF_STATUS}" +echo "E2E probe : ${PROBE_STATUS}" +printf "Total time : %dm%02ds\n" $(( ELAPSED / 60 )) $(( ELAPSED % 60 )) + +# Exit 1 if probe failed; deploy + HF update already succeeded, not rolling back. +[ "$PROBE_STATUS" = "PASS" ] diff --git a/scripts/run_prithvi_flood.py b/scripts/run_prithvi_flood.py new file mode 100644 index 0000000000000000000000000000000000000000..ddafeb944de76c39b9fe35682373daba90dffca3 --- /dev/null +++ b/scripts/run_prithvi_flood.py @@ -0,0 +1,175 @@ +"""Run Prithvi-EO-2.0-300M-TL-Sen1Floods11 once on a low-cloud HLS scene +over NYC. Save the resulting water mask as a vectorized GeoJSON for use +as a Riprap flood-layer specialist. + +This script defers to IBM's official inference.py (downloaded from the +model repo) rather than reimplementing the inference loop — that file +knows about the temporal/location-coord embeddings, the per-window +albumentations stack, and the upernet decoder output shape, all of +which are easy to get wrong. + + python scripts/run_prithvi_flood.py +""" +from __future__ import annotations + +import importlib.util +import json +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +OUT_DIR = ROOT / "data" +OUT_DIR.mkdir(exist_ok=True, parents=True) + +# NYC needs two MGRS tiles to cover everything: +# T18TWL covers Manhattan, Bronx, western Brooklyn, Newark Bay +# T18TXK covers eastern Brooklyn, Queens, Far Rockaway, Jamaica Bay, Long Island Sound +SCENES = [ + ("HLS.S30.T18TWL.2024247T153941.v2.0", "2024-09-04"), # 1% cloud, central NYC + ("HLS.S30.T18TXK.2024252T153819.v2.0", "2024-09-08"), # 0% cloud, eastern NYC +] +SCENE_ID, SCENE_DATE = SCENES[0] # back-compat for legacy users +MODEL_REPO = "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11" +PRITHVI_BAND_NAMES = ["B02", "B03", "B04", "B8A", "B11", "B12"] + + +def _stage_stack(out_path: Path, scene_id: str = SCENE_ID) -> bool: + if out_path.exists(): + return True + import numpy as np + import planetary_computer + import pystac_client + import rasterio + print(f"fetching scene {scene_id}...", file=sys.stderr) + catalog = pystac_client.Client.open( + "https://planetarycomputer.microsoft.com/api/stac/v1", + modifier=planetary_computer.sign_inplace, + ) + item = catalog.get_collection("hls2-s30").get_item(scene_id) + if item is None: + print(" scene not retrievable", file=sys.stderr) + return False + arrays = []; profile = None + for band in PRITHVI_BAND_NAMES: + with rasterio.open(item.assets[band].href) as ds: + arrays.append(ds.read(1)) + if profile is None: + profile = ds.profile.copy() + stack = np.stack(arrays, axis=0).astype("float32") + # Replace nodata -9999 with the inference.py NO_DATA_FLOAT sentinel (0.0001). + # inference.py only treats nodata correctly when explicit mean/std are + # configured — for this Sen1Floods11 fine-tune mean/std are None, so we + # do the substitution upstream and write a clean float32 raster in 0..1 + # reflectance units (constant_scale=0.0001 in config => DN/10000). + stack[stack <= -9000] = 0.0 + stack = stack / 10000.0 + stack = np.clip(stack, 0.0, 1.0).astype("float32") + profile.update(count=6, dtype="float32", + compress="DEFLATE", tiled=True, + blockxsize=256, blockysize=256, nodata=0.0) + with rasterio.open(out_path, "w", **profile) as ds: + for i in range(6): + ds.write(stack[i], i + 1) + print(f" wrote {out_path} ({out_path.stat().st_size // (1024*1024)} MB) " + f"(reflectance units, nodata→0)", file=sys.stderr) + return True + + +def _process_one(scene_id: str, scene_date: str) -> list[dict]: + """Stage one MGRS tile, run Prithvi, vectorise to features. Returns + a list of GeoJSON Features in EPSG:4326 (so they can be merged across + tiles in different UTM zones).""" + stack_path = OUT_DIR / f"hls_stack_{scene_date}.tif" + if not _stage_stack(stack_path, scene_id=scene_id): + return [] + + from huggingface_hub import hf_hub_download + inf_py = hf_hub_download(MODEL_REPO, "inference.py") + cfg = hf_hub_download(MODEL_REPO, "config.yaml") + ckpt = hf_hub_download(MODEL_REPO, "Prithvi-EO-V2-300M-TL-Sen1Floods11.pt") + + spec = importlib.util.spec_from_file_location("prithvi_inf", inf_py) + pm = importlib.util.module_from_spec(spec) + spec.loader.exec_module(pm) + + out_dir = OUT_DIR / "prithvi_runs" + out_dir.mkdir(exist_ok=True) + + pred_path = out_dir / f"pred_{stack_path.stem}.tiff" + if not pred_path.exists(): + print(f"running Prithvi on {scene_id}...", file=sys.stderr) + pm.main(data_file=str(stack_path), config=cfg, checkpoint=ckpt, + output_dir=str(out_dir), rgb_outputs=False, input_indices=None) + else: + print(f" reusing existing pred: {pred_path}", file=sys.stderr) + + if not pred_path.exists(): + cands = list(out_dir.glob(f"pred_{stack_path.stem}*")) + pred_path = cands[0] if cands else None + if pred_path is None or not pred_path.exists(): + print(f" no prediction tiff for {scene_id}", file=sys.stderr) + return [] + + import geopandas as gpd + import rasterio + from rasterio.features import shapes + from shapely.geometry import mapping, shape + + with rasterio.open(pred_path) as ds: + pred = ds.read(1); transform = ds.transform; src_crs = ds.crs + + water_mask = pred == 255 + n_water = int(water_mask.sum()) + print(f" {scene_id}: {n_water} water px " + f"({100*n_water/pred.size:.2f}%)", file=sys.stderr) + + feats = [] + for geom, val in shapes(water_mask.astype("uint8"), + mask=water_mask, transform=transform): + if val == 1: + poly = shape(geom) + if poly.area > 0: + feats.append({"type": "Feature", + "geometry": mapping(poly), + "properties": {"class": "water", + "scene_id": scene_id, + "scene_date": scene_date}}) + + if not feats: + return [] + + # Reproject to EPSG:4326 for cross-tile merging + g = gpd.GeoDataFrame.from_features(feats, crs=src_crs) + g = g.to_crs("EPSG:4326") + return json.loads(g.to_json())["features"] + + +def main() -> int: + out_geojson = OUT_DIR / "prithvi_flood_nyc.geojson" + if out_geojson.exists(): + print(f"already exists: {out_geojson}", file=sys.stderr) + return 0 + + all_features = [] + scene_ids = []; scene_dates = [] + for scene_id, scene_date in SCENES: + feats = _process_one(scene_id, scene_date) + all_features.extend(feats) + if feats: + scene_ids.append(scene_id); scene_dates.append(scene_date) + + out = {"type": "FeatureCollection", "features": all_features, + "scene_ids": scene_ids, "scene_dates": scene_dates, + "model": MODEL_REPO, "crs": "EPSG:4326"} + out_geojson.write_text(json.dumps(out)) + print(f"\nwrote {len(all_features)} water polygons across " + f"{len(scene_ids)} scenes -> {out_geojson} " + f"({out_geojson.stat().st_size // 1024} KB)", file=sys.stderr) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/run_prithvi_ida.py b/scripts/run_prithvi_ida.py new file mode 100644 index 0000000000000000000000000000000000000000..dc15261088a9a3beadd016127cf40dc5a66596b5 --- /dev/null +++ b/scripts/run_prithvi_ida.py @@ -0,0 +1,216 @@ +"""Run Prithvi-EO 2.0 (Sen1Floods11) on a real Hurricane Ida pre/post pair. + +Pre-event: HLS.S30.T18TWK.2021237T153809.v2.0 (2021-08-25, 3% cloud) +Post-event: HLS.S30.T18TWK.2021245T154911.v2.0 (2021-09-02, 1% cloud, + ~12h after peak rainfall) + +This is the genuinely-defensible Prithvi run for the demo: a real flood +event, two clean scenes within the model's optical comfort zone, with a +diff that isolates *new* surface water attributable to Ida from the +permanent rivers/harbor that are present in both scenes. + +Honest framing baked into the metadata: +- The model still misses subway and basement flooding (sub-surface; the + dominant Ida damage mode in NYC). Optical satellite cannot see those. +- By 16:02 UTC Sep 2 (~12 h post-peak), pluvial street water had largely + drained. The diff signal is mostly: Jamaica Bay marsh ponding, + riverside spillover, low-lying park inundation. +- This is what an Apache-2.0 foundation model can defensibly contribute + to a flood-event assessment, and we say so in the report. + + python scripts/run_prithvi_ida.py +""" +from __future__ import annotations + +import importlib.util +import json +import sys +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +ROOT = Path(__file__).resolve().parent.parent +OUT_DIR = ROOT / "data" +OUT_DIR.mkdir(exist_ok=True, parents=True) + +PRE_SCENE = "HLS.S30.T18TWK.2021237T153809.v2.0" +POST_SCENE = "HLS.S30.T18TWK.2021245T154911.v2.0" +PRE_DATE = "2021-08-25" +POST_DATE = "2021-09-02" +EVENT = "Hurricane Ida" + +MODEL_REPO = "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11" +PRITHVI_BAND_NAMES = ["B02", "B03", "B04", "B8A", "B11", "B12"] + + +def _stage_stack(out_path: Path, scene_id: str) -> bool: + if out_path.exists(): + print(f" reusing {out_path.name}", file=sys.stderr) + return True + import numpy as np + import planetary_computer + import pystac_client + import rasterio + print(f"fetching {scene_id}...", file=sys.stderr) + catalog = pystac_client.Client.open( + "https://planetarycomputer.microsoft.com/api/stac/v1", + modifier=planetary_computer.sign_inplace, + ) + item = catalog.get_collection("hls2-s30").get_item(scene_id) + if item is None: + print(f" {scene_id} not retrievable", file=sys.stderr) + return False + arrays = [] + profile = None + for band in PRITHVI_BAND_NAMES: + with rasterio.open(item.assets[band].href) as ds: + arrays.append(ds.read(1)) + if profile is None: + profile = ds.profile.copy() + stack = np.stack(arrays, axis=0).astype("float32") + stack[stack <= -9000] = 0.0 + stack = stack / 10000.0 + stack = np.clip(stack, 0.0, 1.0).astype("float32") + profile.update(count=6, dtype="float32", + compress="DEFLATE", tiled=True, + blockxsize=256, blockysize=256, nodata=0.0) + with rasterio.open(out_path, "w", **profile) as ds: + for i in range(6): + ds.write(stack[i], i + 1) + print(f" wrote {out_path.name} ({out_path.stat().st_size // (1024*1024)} MB)", + file=sys.stderr) + return True + + +def _run_prithvi(stack_path: Path, out_dir: Path) -> Path | None: + """Run inference if needed; return path to pred .tiff.""" + pred_path = out_dir / f"pred_{stack_path.stem}.tiff" + if pred_path.exists(): + print(f" reusing existing pred: {pred_path.name}", file=sys.stderr) + return pred_path + + from huggingface_hub import hf_hub_download + inf_py = hf_hub_download(MODEL_REPO, "inference.py") + cfg = hf_hub_download(MODEL_REPO, "config.yaml") + ckpt = hf_hub_download(MODEL_REPO, "Prithvi-EO-V2-300M-TL-Sen1Floods11.pt") + + spec = importlib.util.spec_from_file_location("prithvi_inf", inf_py) + pm = importlib.util.module_from_spec(spec) + spec.loader.exec_module(pm) + + print(f" running Prithvi on {stack_path.name}...", file=sys.stderr) + pm.main(data_file=str(stack_path), config=cfg, checkpoint=ckpt, + output_dir=str(out_dir), rgb_outputs=False, input_indices=None) + if pred_path.exists(): + return pred_path + cands = list(out_dir.glob(f"pred_{stack_path.stem}*")) + return cands[0] if cands else None + + +def main() -> int: + out_geojson = OUT_DIR / "prithvi_ida_2021.geojson" + if out_geojson.exists(): + print(f"already exists: {out_geojson}", file=sys.stderr) + return 0 + + pre_stack = OUT_DIR / f"hls_stack_pre_ida_{PRE_DATE}.tif" + post_stack = OUT_DIR / f"hls_stack_post_ida_{POST_DATE}.tif" + if not (_stage_stack(pre_stack, PRE_SCENE) and + _stage_stack(post_stack, POST_SCENE)): + return 1 + + out_dir = OUT_DIR / "prithvi_runs" + out_dir.mkdir(exist_ok=True) + pre_pred = _run_prithvi(pre_stack, out_dir) + post_pred = _run_prithvi(post_stack, out_dir) + if pre_pred is None or post_pred is None: + print("inference failed", file=sys.stderr) + return 2 + + # ---- diff: NEW water in post that wasn't in pre = Ida-attributable ---- + import geopandas as gpd + import rasterio + from rasterio.features import shapes + from shapely.geometry import mapping, shape + + with rasterio.open(pre_pred) as ds: + pre = ds.read(1) + with rasterio.open(post_pred) as ds: + post = ds.read(1) + transform = ds.transform + crs = ds.crs + + # The model emits 0 / 255. New-water = post(255) AND pre(!=255) + new_water = (post == 255) & (pre != 255) + n_new = int(new_water.sum()) + n_pre = int((pre == 255).sum()) + n_post = int((post == 255).sum()) + print(f" pre water px: {n_pre:>8d} ({100*n_pre/pre.size:.2f}%)", file=sys.stderr) + print(f" post water px: {n_post:>8d} ({100*n_post/post.size:.2f}%)", file=sys.stderr) + print(f" NEW water px: {n_new:>8d} ({100*n_new/post.size:.2f}%)", file=sys.stderr) + + # also save the post mask for "all post-event water" if useful + post_water = post == 255 + + # vectorize NEW water (Ida-attributable inundation) + feats_new = [] + for geom, val in shapes(new_water.astype("uint8"), + mask=new_water, transform=transform): + if val == 1: + poly = shape(geom) + if poly.area > 0: + feats_new.append({"type": "Feature", + "geometry": mapping(poly), + "properties": {"class": "new_water_post_ida"}}) + + # vectorize ALL post-event water (for legend / context) + feats_post = [] + for geom, val in shapes(post_water.astype("uint8"), + mask=post_water, transform=transform): + if val == 1: + poly = shape(geom) + if poly.area > 0: + feats_post.append({"type": "Feature", + "geometry": mapping(poly), + "properties": {"class": "post_event_water"}}) + + g_new = gpd.GeoDataFrame.from_features(feats_new, crs=crs).to_crs("EPSG:4326") \ + if feats_new else gpd.GeoDataFrame(geometry=[], crs="EPSG:4326") + g_post = gpd.GeoDataFrame.from_features(feats_post, crs=crs).to_crs("EPSG:4326") \ + if feats_post else gpd.GeoDataFrame(geometry=[], crs="EPSG:4326") + + new_features = json.loads(g_new.to_json())["features"] + post_features = json.loads(g_post.to_json())["features"] + + out = { + "type": "FeatureCollection", + "features": new_features, + "_post_event_water_features": post_features, # carried for reference + "event": EVENT, + "pre_scene_id": PRE_SCENE, "pre_scene_date": PRE_DATE, + "post_scene_id": POST_SCENE, "post_scene_date": POST_DATE, + "model": MODEL_REPO, + "crs": "EPSG:4326", + "interpretation": ( + "Polygons in `features` are pixels classified as water in the " + "post-event scene but NOT in the pre-event scene — i.e., " + "candidate Hurricane Ida-attributable inundation. The Sep 2 " + "Sentinel-2 pass was ~12 h after peak rainfall; pluvial street " + "and basement flooding (the dominant Ida damage mode in NYC) " + "had largely drained by then, so this signal mostly captures " + "marsh ponding, riverside spillover, and low-lying park water. " + "Subway and basement flooding are not surface-visible to " + "optical satellites." + ), + } + out_geojson.write_text(json.dumps(out)) + print(f"\nwrote {len(new_features)} new-water polygons + " + f"{len(post_features)} post-event water polygons " + f"-> {out_geojson} ({out_geojson.stat().st_size // 1024} KB)", + file=sys.stderr) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/save_droplet_image.sh b/scripts/save_droplet_image.sh new file mode 100755 index 0000000000000000000000000000000000000000..7c90ad7e8cef25acd53f8e93cc18ba54a164a122 --- /dev/null +++ b/scripts/save_droplet_image.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Riprap droplet image fallback — save the bootstrap droplet's +# riprap-models image to a portable tarball. +# +# Use this when the public-base Dockerfile (services/riprap-models/Dockerfile) +# can't be reproduced — e.g. when AMD's public ROCm release diverges from +# the bootstrap droplet's ABI in a way that breaks our specialists. The +# saved tarball is byte-identical to the running container's image, so +# `docker load` on a fresh droplet gives you the exact env that worked. +# +# Caveats: +# - Tarballs are huge (~15 GB compressed, ~35 GB raw) +# - Needs the droplet to still be alive (run BEFORE you destroy it) +# - Can't be uploaded to a public registry without redacting any +# embedded auth tokens; recommend you scp it to backup storage +# +# Usage: +# scripts/save_droplet_image.sh [out-dir] +# +# Default out-dir: ~/riprap-backups/ +set -euo pipefail + +if [ "$#" -lt 1 ]; then + echo "Usage: $0 [out-dir]" >&2 + exit 64 +fi + +DROPLET_IP="$1" +OUT_DIR="${2:-$HOME/riprap-backups}" +SSH_USER="${SSH_USER:-root}" +SSH_KEY_FLAG="" +if [ -n "${SSH_KEY:-}" ]; then SSH_KEY_FLAG="-i $SSH_KEY"; fi +SSH="ssh $SSH_KEY_FLAG -o StrictHostKeyChecking=accept-new ${SSH_USER}@${DROPLET_IP}" +SCP="scp $SSH_KEY_FLAG -o StrictHostKeyChecking=accept-new" + +mkdir -p "$OUT_DIR" +STAMP=$(date -u +%Y%m%d-%H%M%S) +TAR="$OUT_DIR/riprap-droplet-base-${STAMP}.tar" + +echo "==> 1. Commit running terramind container as image" +$SSH 'docker commit terramind riprap-droplet-base:latest' + +echo "==> 2. Save image to droplet-local tarball" +$SSH "docker save riprap-droplet-base:latest -o /workspace/riprap-droplet-base.tar" +SIZE=$($SSH 'stat -c %s /workspace/riprap-droplet-base.tar') +echo " droplet-local tarball: $SIZE bytes" + +echo "==> 3. Compress (zstd preferred; gzip fallback)" +if $SSH 'command -v zstd > /dev/null'; then + $SSH 'zstd -3 --rm /workspace/riprap-droplet-base.tar' + REMOTE="/workspace/riprap-droplet-base.tar.zst" +else + $SSH 'gzip /workspace/riprap-droplet-base.tar' + REMOTE="/workspace/riprap-droplet-base.tar.gz" +fi +CSIZE=$($SSH "stat -c %s $REMOTE") +echo " compressed: $CSIZE bytes ($(awk "BEGIN { printf \"%.1f\", $CSIZE/$SIZE*100 }")% of raw)" + +echo "==> 4. scp to local: $TAR$(basename $REMOTE | sed 's|riprap-droplet-base.tar||')" +$SCP "${SSH_USER}@${DROPLET_IP}:${REMOTE}" "${TAR}$(echo $REMOTE | sed 's|.*\.tar||')" + +echo "==> 5. Cleanup droplet tarball" +$SSH "rm -f $REMOTE" + +ls -lh "${TAR}"* +echo +echo "Restore on a fresh droplet:" +echo " scp ${TAR}* root@:/workspace/" +echo " ssh root@ 'zstd -d /workspace/riprap-droplet-base.tar.zst -o /tmp/img.tar && docker load -i /tmp/img.tar'" +echo " Then docker run with the original device flags (see CLAUDE.md)." diff --git a/scripts/smoke_test_gpu.sh b/scripts/smoke_test_gpu.sh new file mode 100755 index 0000000000000000000000000000000000000000..fe4ec22aaaa886dc6733a86a3f9cf55a02468dd8 --- /dev/null +++ b/scripts/smoke_test_gpu.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Smoke test the AMD GPU droplet (vLLM + riprap-models). +# Usage: bash scripts/smoke_test_gpu.sh +set -euo pipefail + +IP="${1:?Usage: smoke_test_gpu.sh }" +TOKEN="${2:?Usage: smoke_test_gpu.sh }" +VLLM_URL="http://${IP}:8001" +ML_URL="http://${IP}:7860" + +PASS=0 +FAIL=0 + +check() { + local label="$1"; shift + local status + if status=$(eval "$@" 2>&1); then + echo " PASS $label" + PASS=$((PASS+1)) + else + echo " FAIL $label" + echo " $status" + FAIL=$((FAIL+1)) + fi +} + +echo "=== Smoke test: $IP ===" +echo "" + +echo "--- vLLM (port 8001) ---" +check "vLLM /v1/models" \ + "curl -sf -H 'Authorization: Bearer $TOKEN' $VLLM_URL/v1/models | python3 -c 'import sys,json; d=json.load(sys.stdin); assert len(d[\"data\"]) > 0'" + +check "vLLM /v1/chat/completions" \ + "curl -sf -H 'Authorization: Bearer $TOKEN' -H 'Content-Type: application/json' \ + -d '{\"model\":\"granite-4.1-8b\",\"messages\":[{\"role\":\"user\",\"content\":\"ping\"}],\"max_tokens\":5}' \ + $VLLM_URL/v1/chat/completions | python3 -c 'import sys,json; d=json.load(sys.stdin); assert d[\"choices\"][0][\"message\"][\"content\"]'" + +echo "" +echo "--- riprap-models (port 7860) ---" +check "riprap-models /healthz" \ + "curl -sf $ML_URL/healthz | python3 -c 'import sys,json; d=json.load(sys.stdin); assert d.get(\"ok\") == True'" + +check "riprap-models /v1/granite-embed" \ + "curl -sf -H 'Authorization: Bearer $TOKEN' -H 'Content-Type: application/json' \ + -d '{\"texts\":[\"flood risk in NYC\"]}' \ + $ML_URL/v1/granite-embed | python3 -c 'import sys,json; d=json.load(sys.stdin); assert d.get(\"ok\") and len(d[\"vectors\"]) == 1 and len(d[\"vectors\"][0]) > 0'" + +check "riprap-models /v1/gliner-extract" \ + "curl -sf -H 'Authorization: Bearer $TOKEN' -H 'Content-Type: application/json' \ + -d '{\"text\":\"Hurricane Sandy flooded 80 Pioneer Street in Red Hook Brooklyn.\",\"labels\":[\"location\",\"event\"]}' \ + $ML_URL/v1/gliner-extract | python3 -c 'import sys,json; d=json.load(sys.stdin); assert \"entities\" in d'" + +echo "" +echo "=== Results: ${PASS} PASS, ${FAIL} FAIL ===" +[ "$FAIL" -eq 0 ] diff --git a/scripts/update_hf_env.sh b/scripts/update_hf_env.sh new file mode 100755 index 0000000000000000000000000000000000000000..6335ed951a276cf1c6a9867e6573f8bc05732596 --- /dev/null +++ b/scripts/update_hf_env.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# Update HF Space env vars to point at a new droplet, restart the Space, +# and poll until the agent endpoint returns HTTP 200. +# +# Usage: scripts/update_hf_env.sh +# +# Requires: +# huggingface_hub >= 0.36 installed (provides the Python API used below; +# note: 'huggingface-cli space variables' does not exist in this version) +# Either: +# - `huggingface-cli login` cached token (preferred), OR +# - HF_TOKEN env var +# HfApi() picks up the cached login automatically; HF_TOKEN overrides. +# +# Space slug: lablab-ai-amd-developer-hackathon/riprap-nyc +# Variables set (from docs/DROPLET-RUNBOOK.md §Required secrets): +# RIPRAP_LLM_PRIMARY vllm +# RIPRAP_LLM_BASE_URL http://:8001/v1 +# RIPRAP_LLM_API_KEY +# RIPRAP_ML_BACKEND remote +# RIPRAP_ML_BASE_URL http://:7860 +# RIPRAP_ML_API_KEY +# RIPRAP_NYCHA_REGISTERS 1 +set -euo pipefail + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +IP="$1" +TOKEN="$2" + +SPACE_ID="lablab-ai-amd-developer-hackathon/riprap-nyc" +SPACE_URL="https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space" +VLLM_PORT=8001 +MODELS_PORT=7860 + +echo "==> Updating HF Space variables" +echo " space: ${SPACE_ID}" +echo " droplet ip: ${IP}" +echo " vLLM port: ${VLLM_PORT}" +echo " models port: ${MODELS_PORT}" +echo + +# ---- 1. Set all six Space variables via the huggingface_hub Python API ---- +# huggingface-cli space variables does not exist in huggingface_hub 0.36.x; +# add_space_variable is the documented programmatic interface. +python3 -c " +import sys, os +try: + from huggingface_hub import HfApi +except ImportError: + print('Error: huggingface_hub not installed', file=sys.stderr) + sys.exit(1) + +api = HfApi(token=os.environ.get('HF_TOKEN')) # None → cached CLI login +space_id = '${SPACE_ID}' +ip = '${IP}' +token = '${TOKEN}' +vllm_port = ${VLLM_PORT} +models_port = ${MODELS_PORT} + +variables = { + 'RIPRAP_LLM_PRIMARY': 'vllm', + 'RIPRAP_LLM_BASE_URL': f'http://{ip}:{vllm_port}/v1', + 'RIPRAP_LLM_API_KEY': token, + 'RIPRAP_ML_BACKEND': 'remote', + 'RIPRAP_ML_BASE_URL': f'http://{ip}:{models_port}', + 'RIPRAP_ML_API_KEY': token, + # Heavy register specialists (NYCHA / DOE schools / DOH hospitals). + # Pre-warmed at boot via web/main.py:_warm_caches when this is set; + # without it the FSM never adds these step functions, so the demo + # never sees register cards even when the underlying data is loaded. + 'RIPRAP_NYCHA_REGISTERS': '1', +} + +for key, value in variables.items(): + display = '' if 'KEY' in key else value + print(f' setting {key} = {display}') + api.add_space_variable(repo_id=space_id, key=key, value=value) + +print('[python] all 6 variables set') +" +echo + +# ---- 2. Restart the Space ------------------------------------------------ +echo "==> Restarting HF Space" +python3 -c " +import os +from huggingface_hub import HfApi +api = HfApi(token=os.environ.get('HF_TOKEN')) # None → cached CLI login +rt = api.restart_space(repo_id='${SPACE_ID}') +print(f' stage after restart request: {rt.stage}') +" +echo + +# ---- 3. Poll /api/backend until HTTP 200 (max 120 s) --------------------- +# /api/backend is documented in docs/DROPLET-RUNBOOK.md §Destroy checklist +# as the endpoint to verify the Space is serving. +echo "==> Polling ${SPACE_URL}/api/backend (up to 120 s)..." +DEADLINE=$((SECONDS + 120)) +HEALTHY=0 +while (( SECONDS < DEADLINE )); do + HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \ + --max-time 10 "${SPACE_URL}/api/backend" 2>/dev/null || echo "000") + if [ "$HTTP_CODE" = "200" ]; then + HEALTHY=1 + break + fi + echo " (${HTTP_CODE}) not ready yet — waiting 10 s..." + sleep 10 +done + +if [ "$HEALTHY" -ne 1 ]; then + echo "HF Space did not become healthy within 120s" >&2 + exit 1 +fi + +echo +echo "HF Space updated and healthy. IP=${IP}" diff --git a/services/riprap-models/Dockerfile b/services/riprap-models/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a67f9bc2833be2932cc451242eab6d0a6b50c832 --- /dev/null +++ b/services/riprap-models/Dockerfile @@ -0,0 +1,77 @@ +# Riprap Models — droplet inference service. +# +# Self-contained ROCm + PyTorch image that runs every GPU-accelerable +# specialist Riprap consumes (Prithvi-NYC-Pluvial, TerraMind LULC + +# Buildings, Granite TTM r2, Granite Embedding 278M, GLiNER). +# +# Base: AMD's public ROCm 7.2.3 + Python 3.12 + PyTorch 2.9.1 release +# image. Same minor torch version as the bespoke MI300X image the +# bootstrap droplet was hand-built with (`torch==2.9.1+git8907517`), +# but pulled from a public registry so any fresh droplet can recreate +# the env without internal AMD wheels. The released 2.9.1 has the +# kernels we need — none of riprap-models calls into vLLM-specific +# attention paths, so the dev-build vs release-build delta is +# inconsequential for our forward passes. +# +# Build: docker build -t riprap-models:latest -f Dockerfile ../.. +# Layout: the build context is the project root so the COPY lines +# below can reach `services/riprap-models/`. +# Use the vLLM ROCm image as base — it ships torch 2.9.1+git8907517 +# (the actual AMD bespoke build) and is already cached on DigitalOcean +# AMD GPU droplets, so no download is needed during bring-up. +# The public rocm/pytorch release image is a fallback if this image is +# not available; see the comment block above for background. +FROM vllm/vllm-openai-rocm:v0.17.1 + +ENV DEBIAN_FRONTEND=noninteractive \ + PYTHONUNBUFFERED=1 \ + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + HF_HOME=/root/.cache/huggingface \ + TRANSFORMERS_CACHE=/root/.cache/huggingface \ + # MI300X tuning the running container uses; baking them in so a + # bring-up doesn't require remembering the env-set incantation. + HIP_FORCE_DEV_KERNARG=1 \ + HSA_NO_SCRATCH_RECLAIM=1 \ + PYTORCH_ROCM_ARCH=gfx942 + +# git is needed by some HF model-card downloads (terratorch yaml repos +# pull via the git protocol). curl for healthcheck. libgl1 for +# rasterio's Pillow path. The base ROCm image is Ubuntu 24.04, and +# already includes most build-time deps we need. +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl git libgl1 libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /workspace/riprap-models + +# Install deps in two layers so a code-only change doesn't bust the +# heavy ML wheel cache. requirements.txt holds runtime-narrow +# packages that the service imports; requirements-full.txt is the +# super-set the FSM specialists pull in transitively (terratorch's +# kornia / albumentations chain, granite-tsfm's tsfm_public, etc.). +COPY services/riprap-models/requirements-full.txt /tmp/req-full.txt +RUN pip install --upgrade pip && \ + # Freeze the ROCm torch/torchvision/torchaudio at whatever version + # the vLLM base image ships, so transitive deps (peft, torchgeo, etc.) + # don't pull a CUDA build from PyPI and replace the ROCm one. + pip list --format=freeze > /tmp/all-pkgs.txt && \ + grep -E "^(torch|torchvision|torchaudio)==" /tmp/all-pkgs.txt > /tmp/torch-lock.txt && \ + cat /tmp/torch-lock.txt && \ + pip install -r /tmp/req-full.txt --constraint /tmp/torch-lock.txt + +# Service code itself. Cheap to invalidate; lands last. +COPY services/riprap-models/main.py /workspace/riprap-models/main.py +COPY services/riprap-models/requirements.txt /workspace/riprap-models/requirements.txt + +EXPOSE 7860 + +# Ensure we don't use the vLLM entrypoint. +ENTRYPOINT [] + +# `--proxy-headers` so a future LB sees the right client IP. The +# /healthz route is unauthenticated by design (operators want +# readiness probes to work without secrets); /v1/* requires the +# bearer token via RIPRAP_MODELS_API_KEY. +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", \ + "--log-level", "info", "--proxy-headers"] diff --git a/services/riprap-models/README.md b/services/riprap-models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8a7552c1a6fa017ea143fbef1ad116ea93b2fe52 --- /dev/null +++ b/services/riprap-models/README.md @@ -0,0 +1,152 @@ +# Riprap Models: droplet inference service + +GPU inference microservice that runs alongside vLLM on the AMD MI300X +droplet. Exposes one HTTP endpoint per model class consumed by the +Riprap FastAPI app's probes, so all GPU-accelerable forward passes +(Prithvi-NYC-Pluvial, TerraMind LULC + Buildings, Granite TTM r2, +Granite Embedding 278M, GLiNER) run on the MI300X regardless of +which surface (laptop or HF Space) hosts the FastAPI process. + +## Service contract + +| Method | Path | Purpose | +|---|---|---| +| GET | `/healthz` | reachability probe + which models are warm | +| POST | `/v1/prithvi-pluvial` | Prithvi-NYC-Pluvial v2 segmentation | +| POST | `/v1/terramind` | TerraMind LULC / Buildings / Synthesis (adapter-dispatched) | +| POST | `/v1/ttm-forecast` | Granite TTM r2 (zero-shot Battery, fine-tune Battery, weekly 311, FloodNet recurrence) | +| POST | `/v1/granite-embed` | Granite Embedding 278M batch encode | +| POST | `/v1/gliner-extract` | GLiNER typed-entity extraction | + +Auth: bearer token on every `/v1/*` route via `RIPRAP_MODELS_API_KEY`. +Same shape as vLLM. `/healthz` is open so liveness probes don't need +auth. + +## Deploy: fresh droplet (recommended) + +Use the one-shot bring-up script. Works on any AMD ROCm GPU droplet +with Docker + GPU device files (`/dev/kfd`, `/dev/dri`) and SSH root +access. No prior container state required. + +```bash +scripts/deploy_droplet.sh +``` + +What it does, in order: + +1. Verifies SSH + AMD GPU device files on the droplet +2. Pulls `vllm/vllm-openai-rocm:v0.17.1` +3. Tar-streams `services/riprap-models/` to `/workspace/riprap-build` +4. Builds `riprap-models:latest` from `services/riprap-models/Dockerfile` + (base: `rocm/pytorch:rocm7.2.3_ubuntu24.04_py3.12_pytorch_release_2.9.1`, + ~10–20 min on first build, < 1 min on rebuild) +5. Starts both containers (`vllm` on host port 8001, `riprap-models` + on host port 7860) with `--restart unless-stopped` so they survive + reboots +6. Waits up to 90 s for vLLM `/v1/models` and 60 s for + riprap-models `/healthz`, exits non-zero if either misses + +Re-running on the same droplet is idempotent. Existing containers +get `docker rm -f`'d and recreated. + +Env knobs: + +| Var | Default | Purpose | +|---|---|---| +| `SSH_USER` | `root` | SSH login | +| `SSH_KEY` | (ssh-agent) | path to private key | +| `VLLM_PORT` | `8001` | host port mapping for vLLM | +| `MODELS_PORT` | `7860` | host port mapping for riprap-models | +| `MODEL_REPO` | `ibm-granite/granite-4.1-8b` | LLM repo | +| `HF_CACHE_HOST` | `/root/hf-cache` | HF cache mount on droplet | +| `SKIP_BUILD` | `0` | set `1` to skip Dockerfile build | + +After it returns, set the printed env vars in your local shell or HF +Space variables, run `scripts/probe_addresses.py` to verify, and +you're live. + +## Deploy: extend an existing container (legacy) + +If you already have a `terramind` container with the heavy ML deps +baked in (the bootstrap-droplet path), you can skip the Dockerfile +build and install the runtime deltas only: + +```bash +ssh root@ 'mkdir -p /workspace/riprap-models' +rsync -av --delete services/riprap-models/ root@:/workspace/riprap-models/ +ssh root@ bash <<'REMOTE' +docker cp /workspace/riprap-models terramind:/workspace/ +docker exec -d -e RIPRAP_MODELS_API_KEY="$TOKEN" terramind \ + bash -c "cd /workspace/riprap-models && \ + pip install --no-cache-dir -r requirements.txt && \ + uvicorn main:app --host 0.0.0.0 --port 7860" +REMOTE +``` + +This path uses `requirements.txt` (deltas only); the Dockerfile path +above uses `requirements-full.txt` (everything). Service is +externally reachable at `http://:7860` once the host port +mapping was set when the container was created. + +## Destroy + redeploy runbook + +What survives a droplet destruction: + +- `services/riprap-models/Dockerfile` plus `requirements-full.txt`. + Every pinned dep, captured from the bootstrap droplet on + 2026-05-05. +- `scripts/deploy_droplet.sh`. The bring-up script. +- HF Hub model artefacts. Every fine-tune lives at + `msradam/Prithvi-EO-2.0-NYC-Pluvial`, + `msradam/TerraMind-NYC-Adapters`, + `msradam/Granite-TTM-r2-Battery-Surge`. The Dockerfile pulls them + fresh on first request + +What does NOT survive: + +- The HF cache at `${HF_CACHE_HOST}` (default `/root/hf-cache`) on + the droplet. Every redeploy re-downloads around 12 GB of weights + (Granite 4.1 8b for vLLM around 16 GB, Prithvi v2 around 1.3 GB, + TerraMind adapters around 600 MB, Granite Embedding around 600 MB, + GLiNER around 400 MB, Granite TTM r2 around 6 MB). First query + after redeploy takes around 30 s longer than steady-state because + of the lazy model load. +- The bearer token. Generate a fresh one when re-deploying. + +To redeploy: + +```bash +# 1. Spin up a new GPU droplet (DigitalOcean / AMD Developer Cloud) +# 2. Copy your SSH key to it (DO usually does this for you) +# 3. Run: +TOKEN=$(openssl rand -base64 24) +scripts/deploy_droplet.sh "$TOKEN" + +# 4. Update HF Space env vars to point at the new IP +huggingface-cli space variables \ + lablab-ai-amd-developer-hackathon/riprap-nyc \ + RIPRAP_LLM_BASE_URL=http://:8001/v1 \ + RIPRAP_LLM_API_KEY=$TOKEN \ + RIPRAP_ML_BASE_URL=http://:7860 \ + RIPRAP_ML_API_KEY=$TOKEN + +# 5. Restart the HF Space so it picks up the new env vars +huggingface-cli space restart lablab-ai-amd-developer-hackathon/riprap-nyc + +# 6. Verify end-to-end against the redeployed stack +.venv/bin/python scripts/probe_addresses.py \ + --base https://lablab-ai-amd-developer-hackathon-riprap-nyc.hf.space +``` + +## Local app config + +Set in either env or HF Space variables: + +``` +RIPRAP_ML_BACKEND = remote +RIPRAP_ML_BASE_URL = http://129.212.181.238:7860 +RIPRAP_ML_API_KEY = +``` + +`app/inference.py` posts to those endpoints; specialists fall back +to local in-process model loads when the service is unreachable. diff --git a/services/riprap-models/main.py b/services/riprap-models/main.py new file mode 100644 index 0000000000000000000000000000000000000000..38fd1bdfafa33ec955929b0186bbff8fb4a55eb6 --- /dev/null +++ b/services/riprap-models/main.py @@ -0,0 +1,913 @@ +"""Riprap Models — GPU inference microservice. + +Runs on the AMD MI300X droplet alongside vLLM, exposes one HTTP +endpoint per model class consumed by the Riprap FastAPI app's +specialists. The local app routes through this service when +RIPRAP_ML_BACKEND=remote (or =auto with the service reachable), +keeping all GPU-accelerable forward passes on the MI300X — Granite +4.1 (LLM), Prithvi-NYC-Pluvial (segmentation), TerraMind LULC + +Buildings + Synthesis (LoRA), Granite TTM r2 (forecasts), Granite +Embedding 278M (RAG), and GLiNER (typed extraction). + +Authoritative bearer-token auth same as vLLM. Same env-var shape so +the same secret can be reused across both services on a Space. + +Service contract (mirrors app/inference.py): + + GET /healthz → {ok: true, models_loaded: [...]} + POST /v1/prithvi-pluvial → see _prithvi_pluvial below + POST /v1/terramind → adapter dispatch (lulc/buildings/synth) + POST /v1/ttm-forecast → model dispatch (zero_shot_battery, ...) + POST /v1/granite-embed → batch text → 768-d vectors + POST /v1/gliner-extract → text + labels → typed entities + +Model loading is lazy + cached per-process. The first call to a given +model pays the cold-load cost (~5-30 s); subsequent calls reuse the +in-memory instance. ROCm device binding goes through torch's CUDA +shim — `cuda` is the ROCm device when running on a ROCm-built torch. +""" +from __future__ import annotations + +import base64 +import logging +import os +import threading +import time +from contextlib import asynccontextmanager +from typing import Any + +import numpy as np +from fastapi import Depends, FastAPI, Header, HTTPException +from pydantic import BaseModel + +log = logging.getLogger("riprap.models") +logging.basicConfig( + level=os.environ.get("RIPRAP_MODELS_LOG", "INFO").upper(), + format="%(asctime)s %(levelname)-5s %(name)s: %(message)s", +) + +# Auth — same shape as vLLM. Set RIPRAP_MODELS_API_KEY in the +# `docker run` env. When empty, the service runs unauthenticated +# (only sane for localhost-only deployments). +_AUTH_TOKEN = os.environ.get("RIPRAP_MODELS_API_KEY", "") + +# Device. ROCm-built torch reports CUDA-style symbols; "cuda" maps to +# the first ROCm device on the MI300X. +_DEVICE = os.environ.get("RIPRAP_MODELS_DEVICE", "cuda") + + +def _require_auth(authorization: str | None = Header(default=None)) -> None: + if not _AUTH_TOKEN: + return + if not authorization or not authorization.startswith("Bearer "): + raise HTTPException(status_code=401, detail="Missing bearer token") + if authorization[7:].strip() != _AUTH_TOKEN: + raise HTTPException(status_code=401, detail="Invalid bearer token") + + +# ---- Lazy model singletons -------------------------------------------------- +# +# Each model has a `_load_()` that returns the in-memory instance +# (locking on a per-model threading.Lock so concurrent first-call +# requests don't double-load). Callers grab via `_get_()`. + +_LOCKS = { + "prithvi": threading.Lock(), + "terramind_lulc": threading.Lock(), + "terramind_buildings": threading.Lock(), + "terramind_synth": threading.Lock(), + "ttm": threading.Lock(), + "granite_embed": threading.Lock(), + "gliner": threading.Lock(), +} +_INSTANCES: dict[str, Any] = {} + + +def _decode_array(b64: str, shape: list[int], dtype: str = "float32") -> np.ndarray: + raw = base64.b64decode(b64) + return np.frombuffer(raw, dtype=dtype).reshape(shape) + + +def _to_device(t): + """Move a torch tensor to the configured device. No-op for CPU.""" + if _DEVICE == "cpu": + return t + try: + import torch + if torch.cuda.is_available(): + return t.to("cuda") + except Exception as e: + log.warning("device move skipped: %s", e) + return t + + +# ---- Prithvi-NYC-Pluvial v2 ------------------------------------------------- + +def _load_prithvi(): + if "prithvi" in _INSTANCES: + return _INSTANCES["prithvi"] + with _LOCKS["prithvi"]: + if "prithvi" in _INSTANCES: + return _INSTANCES["prithvi"] + log.info("prithvi: cold load (msradam/Prithvi-EO-2.0-NYC-Pluvial)") + import importlib.util + + from huggingface_hub import hf_hub_download + from terratorch.cli_tools import LightningInferenceModel + + BASE_REPO = "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11" + V2_REPO = "msradam/Prithvi-EO-2.0-NYC-Pluvial" + + # Use the IBM-NASA base config + v2 ckpt. Mirrors + # app/flood_layers/prithvi_live.py:_ensure_model(). + base_config = hf_hub_download(BASE_REPO, "config.yaml") + inference_py = hf_hub_download(BASE_REPO, "inference.py") + + v2_yaml = None + v2_ckpt = None + for name in ("prithvi_nyc_phase14.yaml", "config.yaml"): + try: + v2_yaml = hf_hub_download(V2_REPO, name); break + except Exception: + continue + for name in ("prithvi_nyc_pluvial_v2.ckpt", "best_val_loss.ckpt", "model.ckpt"): + try: + v2_ckpt = hf_hub_download(V2_REPO, name); break + except Exception: + continue + if v2_yaml and v2_ckpt: + log.info("prithvi: building from v2 yaml=%s ckpt=%s", v2_yaml, v2_ckpt) + m = LightningInferenceModel.from_config(v2_yaml, v2_ckpt) + # prithvi_nyc_phase14.yaml uses GenericNonGeoSegmentationDataModule + # which omits test_transform (→ None). IBM inference.py:run_model() + # calls it on a 3D image dict; patch to match the IBM base contract. + if getattr(getattr(m, 'datamodule', None), + 'test_transform', None) is None: + import albumentations as A + import torch as _torch + from albumentations.pytorch import ToTensorV2 + m.datamodule.test_transform = A.Compose([ToTensorV2()]) + _old = m.datamodule.aug + + # IBM's inference.py:188 calls + # `datamodule.aug({'image': tensor})['image']` — + # passing a dict and indexing the result. The previous + # patch wrapped a kornia AugmentationSequential here, + # which doesn't natively accept dict input and tripped + # `'list' object has no attribute 'view'` deep inside + # kornia's internal storage on first inference. Drop + # kornia entirely and use a hand-rolled dict-aware + # normalizer — fewer moving parts, identical math. + class _DictNormalize: + def __init__(self, mean, std): + self.mean = _torch.as_tensor(mean).view(-1, 1, 1).float() + self.std = _torch.as_tensor(std).view(-1, 1, 1).float() + + def __call__(self, sample): + if isinstance(sample, dict): + img = sample["image"] + mean = self.mean.to(img.device) + std = self.std.to(img.device) + return {**sample, "image": (img - mean) / std} + mean = self.mean.to(sample.device) + std = self.std.to(sample.device) + return (sample - mean) / std + + # `_old.means` / `_old.stds` come from the yaml as + # Python lists — calling `.view()` on them is what + # tripped the original `'list' object has no attribute + # 'view'`. _DictNormalize handles the conversion via + # torch.as_tensor internally; just pass the raw values + # whatever their type. + m.datamodule.aug = _DictNormalize(_old.means, _old.stds) + log.info("prithvi: patched v2 datamodule transforms " + "for IBM inference.py compat (dict-aware Normalize)") + else: + log.info("prithvi: v2 unavailable, falling back to base") + base_ckpt = hf_hub_download( + BASE_REPO, "Prithvi-EO-V2-300M-TL-Sen1Floods11.pt") + m = LightningInferenceModel.from_config(base_config, base_ckpt) + m.model.eval() + try: + import torch + if _DEVICE == "cuda" and torch.cuda.is_available(): + m.model.cuda() + except Exception: + log.exception("prithvi: cuda move failed; staying on cpu") + + spec = importlib.util.spec_from_file_location("_prithvi_inference", + inference_py) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + _INSTANCES["prithvi"] = (m, mod.run_model) + log.info("prithvi: ready") + return _INSTANCES["prithvi"] + + +class PrithviIn(BaseModel): + s2: str + shape: list[int] + scene_id: str | None = None + scene_datetime: str | None = None + cloud_cover: float | None = None + + +def _prithvi_pluvial(payload: PrithviIn) -> dict[str, Any]: + t0 = time.time() + m, _run_model = _load_prithvi() + chip = _decode_array(payload.s2, payload.shape, "float32") + # Sen1Floods11 expects [1, 6, 1, H, W] + if chip.ndim == 3: + chip = chip[None, :, None, :, :] + elif chip.ndim == 4: + chip = chip[:, :, None, :, :] # (1, C, H, W) → (1, C, 1, H, W) + + # Bypass IBM's run_model entirely: it's a sliding-window helper + # designed for full-scene inference, and its dependence on + # datamodule.test_transform / datamodule.aug producing a specific + # tensor shape kept tripping us up on the v2 fine-tune (yaml-typed + # means / stds, dict-input vs tensor-input contract). Our chip is + # already exactly model resolution and on-device — just normalise, + # forward, argmax. Same math, no version-skew surface. + import torch as _torch + chip_t = _torch.from_numpy(chip).float() + chip_t = _to_device(chip_t) + + # Means / stds from prithvi_nyc_phase14.yaml (the v2 training + # config). Same six bands in the same order as the chip + # (BLUE, GREEN, RED, NARROW_NIR, SWIR_1, SWIR_2). + means_t = _torch.tensor( + [0.107, 0.107, 0.115, 0.265, 0.235, 0.155], + device=chip_t.device, dtype=chip_t.dtype, + ).view(1, 6, 1, 1, 1) + stds_t = _torch.tensor( + [0.082, 0.075, 0.085, 0.115, 0.11, 0.1], + device=chip_t.device, dtype=chip_t.dtype, + ).view(1, 6, 1, 1, 1) + x = (chip_t - means_t) / stds_t + + with _torch.no_grad(): + out = m.model(x) + logits = out.output if hasattr(out, "output") else out + + # logits shape (B, num_classes, H, W) for segmentation. Argmax → + # (B, H, W) class indices. + pred = logits.argmax(dim=1).squeeze(0).cpu().numpy().astype("uint8") + pct_full = float(100.0 * pred.mean()) + # Center-disk fraction (500 m at 10 m/px → 50 px radius from chip center). + h, w = pred.shape + yy, xx = np.indices(pred.shape) + cy, cx = h // 2, w // 2 + dist = np.sqrt((yy - cy) ** 2 + (xx - cx) ** 2) + mask = dist <= min(50, min(h, w) // 4) + pct_500m = float(100.0 * pred[mask].mean()) if mask.any() else pct_full + # Pass the raw prediction raster back so HF can vectorise it into + # GeoJSON for the map layer using the chip-georef it already has + # locally (ref_da from _build_chip). uint8 is small enough for a + # base64 round-trip (~50 KB at 224x224). + pred_b64 = base64.b64encode(pred.tobytes()).decode("ascii") + return { + "ok": True, + "elapsed_s": round(time.time() - t0, 2), + "device": _DEVICE, + "pct_water_within_500m": round(pct_500m, 3), + "pct_water_full": round(pct_full, 3), + "scene_id": payload.scene_id, + "scene_datetime": payload.scene_datetime, + "cloud_cover": payload.cloud_cover, + "shape": [int(h), int(w)], + "pred_b64": pred_b64, + "pred_shape": [int(h), int(w)], + } + + +# ---- TerraMind (lulc / buildings / synthesis) ------------------------------- + +_TERRAMIND_REPO = "msradam/TerraMind-NYC-Adapters" +_TERRAMIND_SPECS = { + "lulc": {"subdir": "lulc_nyc", "num_classes": 5, + "labels": ["Trees", "Cropland", "Built", "Bare", "Water"]}, + "buildings": {"subdir": "buildings_nyc", "num_classes": 2, + "labels": ["Background", "Building"]}, + # Synthesis is the IBM/NASA base TerraMind generative path + # (DEM -> LULC), not a NYC fine-tune. Listed here so the same + # /v1/terramind dispatch handles it. + "synthesis": {"subdir": None, "num_classes": None, + "labels": ["Water", "Trees", "Grass", "Flooded vegetation", + "Crops", "Scrub/Shrub", "Built", "Bare ground", + "Snow/Ice", "Clouds"]}, +} +_TERRAMIND_SYNTH_TIMESTEPS = int(os.environ.get( + "RIPRAP_TERRAMIND_SYNTH_TIMESTEPS", "10")) + + +def _load_terramind_synthesis(): + """Load the IBM/NASA base TerraMind v1 generative path + (DEM -> LULC) once. Different machinery from the LoRA adapters: + pulled via terratorch's FULL_MODEL_REGISTRY rather than + SemanticSegmentationTask + LoRA injection.""" + key = "terramind_synthesis" + if key in _INSTANCES: + return _INSTANCES[key] + with _LOCKS.get(key, _LOCKS.get("terramind_lulc")): + if key in _INSTANCES: + return _INSTANCES[key] + log.info("terramind/synthesis: cold load (v1 base generate)") + import terratorch.models.backbones.terramind.model.terramind_register # noqa + from terratorch.registry import FULL_MODEL_REGISTRY + m = FULL_MODEL_REGISTRY.build( + "terratorch_terramind_v1_base_generate", + modalities=["DEM"], + output_modalities=["LULC"], + pretrained=True, + timesteps=_TERRAMIND_SYNTH_TIMESTEPS, + ) + try: + import torch + if _DEVICE == "cuda" and torch.cuda.is_available(): + m = m.to("cuda") + except Exception: + log.exception("terramind/synthesis: cuda move failed") + m.eval() + _INSTANCES[key] = m + log.info("terramind/synthesis: ready") + return m + + +def _load_terramind(adapter: str): + if adapter == "synthesis": + return _load_terramind_synthesis() + key = f"terramind_{adapter}" + if key in _INSTANCES: + return _INSTANCES[key] + with _LOCKS.get(key, _LOCKS.get("terramind_lulc")): + if key in _INSTANCES: + return _INSTANCES[key] + log.info("terramind/%s: cold load", adapter) + from huggingface_hub import snapshot_download + from peft import LoraConfig, inject_adapter_in_model + from safetensors.torch import load_file + from terratorch.tasks import SemanticSegmentationTask + + spec = _TERRAMIND_SPECS[adapter] + adapter_root = snapshot_download( + _TERRAMIND_REPO, allow_patterns=[f"{spec['subdir']}/*"]) + task = SemanticSegmentationTask( + model_factory="EncoderDecoderFactory", + model_args=dict( + backbone="terramind_v1_base", + backbone_pretrained=True, + backbone_modalities=["S2L2A", "S1RTC", "DEM"], + backbone_use_temporal=True, + backbone_temporal_pooling="concat", + backbone_temporal_n_timestamps=4, + necks=[ + {"name": "SelectIndices", "indices": [2, 5, 8, 11]}, + {"name": "ReshapeTokensToImage", "remove_cls_token": False}, + {"name": "LearnedInterpolateToPyramidal"}, + ], + decoder="UNetDecoder", + decoder_channels=[512, 256, 128, 64], + head_dropout=0.1, + num_classes=spec["num_classes"], + ), + loss="ce", lr=1e-4, freeze_backbone=False, freeze_decoder=False, + ) + inject_adapter_in_model(LoraConfig( + r=16, lora_alpha=32, lora_dropout=0.05, + target_modules=["attn.qkv", "attn.proj"], bias="none", + ), task.model.encoder) + adapter_dir = f"{adapter_root}/{spec['subdir']}" + lora = load_file(f"{adapter_dir}/adapter_model.safetensors") + head = load_file(f"{adapter_dir}/decoder_head.safetensors") + task.model.encoder.load_state_dict( + {k.removeprefix("encoder."): v for k, v in lora.items() + if k.startswith("encoder.")}, strict=False) + for sub in ("decoder", "neck", "head", "aux_heads"): + ss = {k[len(sub) + 1:]: v for k, v in head.items() + if k.startswith(sub + ".")} + if ss and hasattr(task.model, sub): + getattr(task.model, sub).load_state_dict(ss, strict=False) + try: + import torch + if _DEVICE == "cuda" and torch.cuda.is_available(): + task = task.to("cuda") + except Exception: + log.exception("terramind: cuda move failed") + task.eval() + _INSTANCES[key] = task + log.info("terramind/%s: ready", adapter) + return task + + +class TerramindIn(BaseModel): + adapter: str # "lulc" | "buildings" | "synthesis" + # All modality fields optional — `synthesis` adapter only needs DEM, + # while lulc / buildings need at minimum S2L2A. + s2: str | None = None + s2_shape: list[int] | None = None + s1: str | None = None + s1_shape: list[int] | None = None + dem: str | None = None + dem_shape: list[int] | None = None + + +def _build_chip_tensor(np_arr, n_timesteps: int = 4): + """Normalize any incoming chip shape into TerraMind's expected + (B, C, T, H, W). The HF Space's eo_chip_cache hands us a chip that + is already (B, C, T, H, W) 5-D — pass through. Older callers that + send a single-timestep (C, H, W) get expanded to T=4 by repetition; + a (C, T, H, W) gets just the batch dim added.""" + import torch + t = torch.from_numpy(np_arr).float() + if t.ndim == 5: + return t # (B, C, T, H, W) + if t.ndim == 4: + return t.unsqueeze(0) # (C, T, H, W) -> (1, C, T, H, W) + if t.ndim == 3: + t = t.unsqueeze(1) # (C, H, W) -> (C, 1, H, W) + if t.shape[1] == 1: + t = t.repeat(1, n_timesteps, 1, 1) # repeat single timestep + return t.unsqueeze(0) # add batch dim + raise ValueError(f"unexpected chip shape {tuple(t.shape)}") + + +def _terramind_synthesis_inference(payload: TerramindIn) -> dict[str, Any]: + """DEM -> LULC generative path. Different machinery from the LoRA + adapters: model is the v1 base generate stack pulled from + terratorch's FULL_MODEL_REGISTRY, takes a single 4-D (B, 1, H, W) + DEM tensor, and emits a class-logit raster keyed by the ESRI + 2020 LULC tokenizer codebook.""" + t0 = time.time() + log.info("terramind/synthesis: payload dem=%s dem_shape=%s s2=%s", + "set" if payload.dem else "None", + payload.dem_shape, + "set" if payload.s2 else "None") + if not payload.dem or not payload.dem_shape: + log.warning("terramind/synthesis: missing dem (dem=%s, shape=%s)", + bool(payload.dem), payload.dem_shape) + raise HTTPException(status_code=400, + detail="synthesis requires `dem` + `dem_shape`") + model = _load_terramind_synthesis() + dem_np = _decode_array(payload.dem, payload.dem_shape) + + import numpy as np + import torch + dem_t = torch.from_numpy(dem_np).float() + # The v1 base generative encoder unpacks `B, C, H, W = x.shape` — + # 4-D required. DEM has C=1, so canonical shape is (1, 1, H, W). + # Verified empirically against terratorch_terramind_v1_base_generate. + if dem_t.ndim == 2: + dem_t = dem_t.unsqueeze(0).unsqueeze(0) # (H, W) -> (1, 1, H, W) + elif dem_t.ndim == 3: + dem_t = dem_t.unsqueeze(0) # (1, H, W) -> (1, 1, H, W) + elif dem_t.ndim != 4: + raise HTTPException(status_code=400, + detail=f"unexpected DEM shape {tuple(dem_t.shape)}; " + f"expected 4-D (B, C, H, W)") + dem_t = _to_device(dem_t) + + spec = _TERRAMIND_SPECS["synthesis"] + with torch.no_grad(): + out = model({"DEM": dem_t}, + timesteps=_TERRAMIND_SYNTH_TIMESTEPS, + verbose=False) + lulc = out["LULC"] + if hasattr(lulc, "detach"): + lulc = lulc.detach().cpu().numpy() + if lulc.ndim == 4: + lulc = lulc[0] # (n_classes, H, W) + class_idx = lulc.argmax(axis=0) # (H, W) per-pixel class + unique, counts = np.unique(class_idx, return_counts=True) + total = float(class_idx.size) or 1.0 + fractions: dict[str, float] = {} + for u, c in zip(unique, counts): + u = int(u) + label = spec["labels"][u] if 0 <= u < len(spec["labels"]) else f"class_{u}" + fractions[label] = round(100.0 * c / total, 2) + ordered = dict(sorted(fractions.items(), + key=lambda kv: kv[1], reverse=True)) + dominant_class = next(iter(ordered)) if ordered else "unknown" + dominant_pct = ordered.get(dominant_class, 0.0) + pred_u8 = class_idx.astype("uint8") + pred_b64 = base64.b64encode(pred_u8.tobytes()).decode("ascii") + return { + "ok": True, + "adapter": "synthesis", + "elapsed_s": round(time.time() - t0, 2), + "device": _DEVICE, + "synthetic_modality": True, + "tim_chain": ["DEM", "LULC_synthetic"], + "diffusion_steps": _TERRAMIND_SYNTH_TIMESTEPS, + "class_fractions": ordered, + "dominant_class": dominant_class, + "dominant_pct": dominant_pct, + "n_classes_observed": len(ordered), + "shape": list(lulc.shape), + "n_pixels": int(class_idx.size), + "label_schema": "ESRI 2020-2022 Land Cover (tentative — TerraMind " + "tokenizer source confirms ESRI but not exact " + "label-to-index mapping)", + "pred_b64": pred_b64, + "pred_shape": [int(s) for s in pred_u8.shape], + "class_labels": _TERRAMIND_SPECS["synthesis"]["labels"], + } + + +def _terramind_inference(payload: TerramindIn) -> dict[str, Any]: + if payload.adapter not in _TERRAMIND_SPECS: + raise HTTPException(status_code=400, + detail=f"unknown adapter {payload.adapter!r}") + if payload.adapter == "synthesis": + return _terramind_synthesis_inference(payload) + t0 = time.time() + task = _load_terramind(payload.adapter) + spec = _TERRAMIND_SPECS[payload.adapter] + + if not payload.s2 or not payload.s2_shape: + raise HTTPException(status_code=400, + detail=f"adapter {payload.adapter!r} requires " + f"`s2` + `s2_shape`") + s2 = _decode_array(payload.s2, payload.s2_shape) + chips = {"S2L2A": _to_device(_build_chip_tensor(s2))} + if payload.s1 and payload.s1_shape: + s1 = _decode_array(payload.s1, payload.s1_shape) + chips["S1RTC"] = _to_device(_build_chip_tensor(s1)) + if payload.dem and payload.dem_shape: + dem = _decode_array(payload.dem, payload.dem_shape) + chips["DEM"] = _to_device(_build_chip_tensor(dem)) + + import torch + + def _forward(x): + out = task.model(x) + return out.output if hasattr(out, "output") else out + + # Call the model directly — same shape contract as the + # training-time inference at + # experiments/18_terramind_nyc_lora/shared/inference_ensemble.py: + # the canonical chip is already the model's native 224×224 input + # in (B, C, T, H, W) form, so terratorch's `tiled_inference` is + # unnecessary and was the cause of the "Expected size 12 but got + # size 2" 5-D handling regression we hit on the L4 deploy. + # Tile only when the chip is bigger than the model resolution. + s2_t = chips["S2L2A"] + h_chip, w_chip = int(s2_t.shape[-2]), int(s2_t.shape[-1]) + with torch.no_grad(): + if h_chip == 224 and w_chip == 224: + logits = _forward(chips) + else: + from terratorch.tasks.tiled_inference import tiled_inference + + def _forward_tile(x, **_extra): + return _forward(x) + + logits = tiled_inference( + _forward_tile, chips, out_channels=spec["num_classes"], + h_crop=224, w_crop=224, h_stride=128, w_stride=128, + average_patches=True, blend_overlaps=True, padding="reflect", + ) + pred = logits.argmax(dim=1).squeeze(0).cpu().numpy().astype("uint8") + n = max(int(pred.size), 1) + fractions = { + spec["labels"][i]: round(100.0 * float((pred == i).sum()) / n, 2) + for i in range(spec["num_classes"]) + } + fractions = {k: v for k, v in fractions.items() if v > 0} + dom_idx = int(max(range(spec["num_classes"]), + key=lambda i: int((pred == i).sum()))) + + # Buildings: connected-component count (parity with local + # _summarize_buildings). The card subhead reads this — without it, + # the UI shows "0 distinct components". + n_components = None + if payload.adapter == "buildings": + try: + from scipy.ndimage import label + _, n_components = label((pred == 1).astype("uint8")) + n_components = int(n_components) + except Exception: + log.debug("terramind/buildings: scipy.ndimage unavailable") + + # Pass the per-pixel argmax raster back so HF can vectorise it. + pred_b64 = base64.b64encode(pred.tobytes()).decode("ascii") + return { + "ok": True, + "adapter": payload.adapter, + "elapsed_s": round(time.time() - t0, 2), + "device": _DEVICE, + "shape": list(pred.shape), + "n_pixels": int(pred.size), + "class_fractions": fractions, + "dominant_class": spec["labels"][dom_idx], + "dominant_pct": fractions.get(spec["labels"][dom_idx], 0.0), + # Buildings-specific stat (None when not the buildings adapter). + "pct_buildings": round(100.0 * float((pred == 1).sum()) / n, 2) + if payload.adapter == "buildings" else None, + "n_building_components": n_components, + "pred_b64": pred_b64, + "pred_shape": [int(s) for s in pred.shape], + "class_labels": spec["labels"], + } + + +# ---- Granite TTM r2 --------------------------------------------------------- + +_TTM_MODELS = { + "zero_shot_battery": "ibm-granite/granite-timeseries-ttm-r2", + "fine_tune_battery": "msradam/Granite-TTM-r2-Battery-Surge", + "weekly_311": "ibm-granite/granite-timeseries-ttm-r2", + "floodnet_recurrence": "ibm-granite/granite-timeseries-ttm-r2", +} + + +def _load_ttm(model_key: str): + key = f"ttm:{model_key}" + if key in _INSTANCES: + return _INSTANCES[key] + with _LOCKS["ttm"]: + if key in _INSTANCES: + return _INSTANCES[key] + log.info("ttm/%s: cold load", model_key) + if model_key == "fine_tune_battery": + from huggingface_hub import snapshot_download + from tsfm_public import TinyTimeMixerForPrediction + local_dir = snapshot_download(_TTM_MODELS[model_key]) + m = TinyTimeMixerForPrediction.from_pretrained(local_dir).eval() + else: + from tsfm_public.toolkit.get_model import get_model + # Caller passes (context_length, prediction_length) — for the + # zero-shot & 311 & FloodNet specialists we let the toolkit + # pick the best matching pretrained config. Cache one per + # model_key to avoid duplicate loads. + m = get_model(_TTM_MODELS[model_key], + context_length=512, prediction_length=96).eval() + try: + import torch + if _DEVICE == "cuda" and torch.cuda.is_available(): + m = m.to("cuda") + except Exception: + log.exception("ttm: cuda move failed") + _INSTANCES[key] = m + log.info("ttm/%s: ready", model_key) + return m + + +class TtmIn(BaseModel): + model: str # zero_shot_battery | fine_tune_battery | weekly_311 | floodnet_recurrence + history: list[float] + context_length: int + prediction_length: int + cadence: str = "h" + + +def _ttm_forecast(payload: TtmIn) -> dict[str, Any]: + t0 = time.time() + if payload.model not in _TTM_MODELS: + raise HTTPException(status_code=400, + detail=f"unknown model {payload.model!r}") + m = _load_ttm(payload.model) + import torch + series = np.array(payload.history, dtype="float32") + if len(series) < payload.context_length: + # Front-pad with the leading value so the model gets the right + # shape — caller-side fills are NaN-clean already, so this only + # extends a series whose history is shorter than context. + pad = np.full(payload.context_length - len(series), series[0] + if len(series) else 0.0, dtype="float32") + series = np.concatenate([pad, series]) + series = series[-payload.context_length:] + x = torch.from_numpy(series).float().unsqueeze(0).unsqueeze(-1) + x = _to_device(x) + with torch.no_grad(): + out = m(past_values=x) + fc = out.prediction_outputs.squeeze(-1).squeeze(0).cpu().numpy() + peak_idx = int(np.argmax(np.abs(fc))) + return { + "ok": True, + "model": payload.model, + "elapsed_s": round(time.time() - t0, 2), + "device": _DEVICE, + "context_length": payload.context_length, + "prediction_length": payload.prediction_length, + "cadence": payload.cadence, + "forecast": [round(float(v), 6) for v in fc.tolist()], + "peak_index": peak_idx, + "peak_value": round(float(fc[peak_idx]), 6), + } + + +# ---- Granite Embedding 278M ------------------------------------------------- + +_EMBED_REPO = "ibm-granite/granite-embedding-278m-multilingual" + + +def _load_embed(): + if "granite_embed" in _INSTANCES: + return _INSTANCES["granite_embed"] + with _LOCKS["granite_embed"]: + if "granite_embed" in _INSTANCES: + return _INSTANCES["granite_embed"] + log.info("granite-embed: cold load") + from sentence_transformers import SentenceTransformer + m = SentenceTransformer(_EMBED_REPO, + device="cuda" if _DEVICE == "cuda" else "cpu") + _INSTANCES["granite_embed"] = m + log.info("granite-embed: ready") + return m + + +class EmbedIn(BaseModel): + texts: list[str] + + +def _granite_embed(payload: EmbedIn) -> dict[str, Any]: + t0 = time.time() + m = _load_embed() + vecs = m.encode(payload.texts, normalize_embeddings=True, + show_progress_bar=False) + return { + "ok": True, + "elapsed_s": round(time.time() - t0, 2), + "device": _DEVICE, + "n": len(payload.texts), + "dim": int(vecs.shape[-1]) if hasattr(vecs, "shape") else len(vecs[0]), + "vectors": [list(map(float, v)) for v in vecs], + } + + +# ---- GLiNER ---------------------------------------------------------------- + +_GLINER_REPO = "urchade/gliner_medium-v2.1" + + +def _load_gliner(): + if "gliner" in _INSTANCES: + return _INSTANCES["gliner"] + with _LOCKS["gliner"]: + if "gliner" in _INSTANCES: + return _INSTANCES["gliner"] + log.info("gliner: cold load") + from gliner import GLiNER + m = GLiNER.from_pretrained(_GLINER_REPO) + try: + import torch + if _DEVICE == "cuda" and torch.cuda.is_available(): + m = m.to("cuda") + except Exception: + log.exception("gliner: cuda move failed") + _INSTANCES["gliner"] = m + log.info("gliner: ready") + return m + + +class GlinerIn(BaseModel): + text: str + labels: list[str] + + +def _gliner_extract(payload: GlinerIn) -> dict[str, Any]: + t0 = time.time() + m = _load_gliner() + ents = m.predict_entities(payload.text, payload.labels) + return { + "ok": True, + "elapsed_s": round(time.time() - t0, 2), + "device": _DEVICE, + "entities": [ + {"label": e["label"], "text": e["text"], + "start": int(e.get("start", 0)), "end": int(e.get("end", 0)), + "score": float(e.get("score", 0))} + for e in ents + ], + } + + +# ---- FastAPI app ------------------------------------------------------------ + +# Last error per route, kept on the in-memory map so /v1/diag can +# expose it without forcing the operator to grep container logs. +_LAST_ERR: dict[str, dict[str, Any]] = {} + + +def _safe_route(stage: str, fn, payload): + """Wrap a route body so an uncaught exception becomes a structured + `{"ok": False, "err": "...", "stage": "..."}` JSON response with + HTTP 200 instead of FastAPI's opaque "Internal Server Error" body. + + The proxy on :7860 forwards this body untouched, so the FSM + specialist surfaces the real reason in the trace card. Logs the + full traceback to stderr so operators can still root-cause from + the Space's runtime logs.""" + try: + return fn(payload) + except HTTPException: + raise + except Exception as e: # noqa: BLE001 + import traceback + tb = traceback.format_exc() + log.error("route %s failed: %s\n%s", stage, e, tb) + info = { + "ok": False, + "err": f"{type(e).__name__}: {e}", + "stage": stage, + "ts": time.time(), + } + _LAST_ERR[stage] = {**info, "traceback_tail": tb.splitlines()[-3:]} + return info + + +@asynccontextmanager +async def lifespan(_app: FastAPI): + log.info("riprap-models starting on device=%s auth=%s", + _DEVICE, "yes" if _AUTH_TOKEN else "no") + # Pre-load the heavy models so the first user request doesn't + # collide with a cold-load on the same GPU as vLLM. Each warm + # is best-effort: a single model failing must not block the + # service from starting (others may still serve). + if os.environ.get("RIPRAP_MODELS_WARM_AT_STARTUP", "1").lower() in ("1", "true", "yes"): + for stage, fn in ( + ("warm/prithvi", _load_prithvi), + ("warm/terramind_synthesis", _load_terramind_synthesis), + ("warm/terramind_lulc", lambda: _load_terramind("lulc")), + ("warm/terramind_buildings", lambda: _load_terramind("buildings")), + ("warm/embed", _load_embed), + ("warm/gliner", _load_gliner), + ): + try: + fn() + log.info("startup %s ok", stage) + except Exception as e: # noqa: BLE001 + import traceback + tb = traceback.format_exc() + log.exception("startup %s failed: %s", stage, e) + _LAST_ERR[stage] = { + "ok": False, + "err": f"{type(e).__name__}: {e}", + "stage": stage, + "traceback_tail": tb.splitlines()[-5:], + } + yield + log.info("riprap-models stopping") + + +app = FastAPI(title="riprap-models", version="0.5.1", lifespan=lifespan) + + +@app.get("/healthz") +def healthz(): + return {"ok": True, "device": _DEVICE, + "models_loaded": sorted(_INSTANCES.keys()), + "last_errors": _LAST_ERR} + + +@app.get("/v1/diag", dependencies=[Depends(_require_auth)]) +def diag(): + """Operator-only diagnostic snapshot — what's loaded, last + per-stage error (with a 3-line traceback tail), and CUDA + visibility. The proxy forwards this through the catch-all so + operators can hit it from outside the Space.""" + cuda = {"available": False, "devices": []} + try: + import torch + cuda["available"] = bool(torch.cuda.is_available()) + if cuda["available"]: + cuda["devices"] = [{ + "name": torch.cuda.get_device_name(i), + "mem_total_mb": torch.cuda.get_device_properties(i).total_memory // (1024 * 1024), + "mem_alloc_mb": torch.cuda.memory_allocated(i) // (1024 * 1024), + } for i in range(torch.cuda.device_count())] + except Exception as e: # noqa: BLE001 + cuda["err"] = f"{type(e).__name__}: {e}" + return { + "device": _DEVICE, + "models_loaded": sorted(_INSTANCES.keys()), + "last_errors": _LAST_ERR, + "cuda": cuda, + } + + +@app.post("/v1/prithvi-pluvial", dependencies=[Depends(_require_auth)]) +def prithvi_pluvial_route(payload: PrithviIn): + return _safe_route("prithvi-pluvial", _prithvi_pluvial, payload) + + +@app.post("/v1/terramind", dependencies=[Depends(_require_auth)]) +def terramind_route(payload: TerramindIn): + return _safe_route(f"terramind/{payload.adapter}", + _terramind_inference, payload) + + +@app.post("/v1/ttm-forecast", dependencies=[Depends(_require_auth)]) +def ttm_forecast_route(payload: TtmIn): + return _safe_route("ttm-forecast", _ttm_forecast, payload) + + +@app.post("/v1/granite-embed", dependencies=[Depends(_require_auth)]) +def granite_embed_route(payload: EmbedIn): + return _safe_route("granite-embed", _granite_embed, payload) + + +@app.post("/v1/gliner-extract", dependencies=[Depends(_require_auth)]) +def gliner_extract_route(payload: GlinerIn): + return _safe_route("gliner-extract", _gliner_extract, payload) diff --git a/services/riprap-models/requirements-full.txt b/services/riprap-models/requirements-full.txt new file mode 100644 index 0000000000000000000000000000000000000000..1e08d70d9dbf02df1de3ac3dbc8a8810dc190c61 --- /dev/null +++ b/services/riprap-models/requirements-full.txt @@ -0,0 +1,65 @@ +# Riprap Models — full runtime requirements +# +# Pinned to the exact versions the bootstrap MI300X container ran with, +# captured via `pip freeze` inside the running `terramind` container on +# 2026-05-05. Keep these pins until something in the spec needs to +# change — the AMD ROCm + terratorch + tsfm_public stack has narrow +# version compatibility windows. +# +# Torch / torchvision / torchaudio are NOT pinned here because they +# come from the base image (rocm/pytorch ROCm 7.2.3 + torch 2.9.1 +# release). Pinning them again would cause pip to attempt a re-install +# of a different ABI and break the build. + +# ---- Core HF / transformers stack ---------------------------------------- +transformers==4.57.6 +peft==0.18.1 +accelerate==1.13.0 +safetensors>=0.4.5,<0.9 +huggingface_hub==0.36.2 +sentence-transformers==5.4.1 +gliner==0.2.26 + +# ---- IBM Granite TimeSeries TTM r2 (TTM forecast specialists) ------------ +granite-tsfm + +# ---- Prithvi-EO / TerraMind serving stack -------------------------------- +# terratorch pulls torchgeo, lightning, jsonargparse, kornia, timm, einops, +# albumentations, etc. Pinning the leaves explicitly so transitive bumps +# don't drift the FSM specialists' behaviour silently. +terratorch==1.2.7 +torchgeo==0.9.0 +torchmetrics==1.9.0 +lightning==2.6.1 +jsonargparse==4.48.0 +albumentations==2.0.8 +albucore==0.0.24 +kornia==0.8.2 +timm==1.0.25 +einops==0.8.2 + +# ---- Geospatial I/O (used by the NYC-cropping helpers) ------------------- +rasterio==1.5.0 +pyproj==3.7.2 +geopandas==1.1.3 +shapely==2.1.2 +pystac==1.14.3 +pystac-client==0.9.0 +rioxarray==0.22.0 +xarray==2026.4.0 +tifffile==2026.5.2 +ImageIO==2.37.3 + +# ---- Numeric core -------------------------------------------------------- +numpy==2.4.4 +pandas==3.0.0 +scipy==1.17.1 +scikit-learn>=1.5,<1.8 +pillow==12.1.1 + +# ---- Web / IO ------------------------------------------------------------ +fastapi==0.135.1 +uvicorn==0.41.0 +pydantic==2.12.5 +httpx==0.28.1 +requests==2.32.5 diff --git a/services/riprap-models/requirements.txt b/services/riprap-models/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d75ef57e4ac8168ff4e8232191380840cc3094c --- /dev/null +++ b/services/riprap-models/requirements.txt @@ -0,0 +1,12 @@ +# Riprap Models — droplet inference service. +# +# Most heavy deps (torch+ROCm, terratorch, granite-tsfm, transformers, +# peft, safetensors, fastapi, uvicorn) are already in the `terramind` +# container's image. This list is only the deltas the service needs +# beyond that base — install with: +# +# docker exec terramind pip install -r /workspace/riprap-models/requirements.txt +fastapi-cli >= 0.0.5 +gliner >= 0.2.6 +sentence-transformers >= 5.0.0 +huggingface_hub >= 0.34 diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000000000000000000000000000000000000..7518fc90bf78b2b347b694f695c6e1ed120d523c --- /dev/null +++ b/uv.lock @@ -0,0 +1,3 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" diff --git a/web/__init__.py b/web/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/web/main.py b/web/main.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a9c33940c4a3745b6c1dced46a813eaf6a7bcb --- /dev/null +++ b/web/main.py @@ -0,0 +1,970 @@ +"""Riprap web UI — FastAPI + SSE streaming of the Burr FSM trace. + +Run: uvicorn web.main:app --reload --port 8000 +""" +from __future__ import annotations + +import json +import os +import warnings +from pathlib import Path + +warnings.filterwarnings("ignore") + +from fastapi import FastAPI, Request # noqa: E402 +from fastapi.responses import FileResponse, JSONResponse, StreamingResponse # noqa: E402 +from fastapi.staticfiles import StaticFiles # noqa: E402 + +from app import emissions # noqa: E402 +from app.context import floodnet # noqa: E402 +from app.flood_layers import dep_stormwater, sandy_inundation # noqa: E402 +from app.fsm import iter_steps # noqa: E402 +from app.stones import DATA_STONES # noqa: E402 +from app.stones import capstone as _capstone_stone # noqa: E402 + +# Map FSM step name -> Stone for the SSE stone_start / stone_done envelope. +# Steps not in this map (geocode, rag_granite_embedding, gliner_extract, +# nta_resolve and friends) don't open a Stone boundary — they're +# orientation / policy infrastructure shared across Stones. +_STEP_TO_STONE: dict[str, str] = { + # Cornerstone — single_address + polygon-aggregated (neighborhood) + "sandy_inundation": "Cornerstone", + "dep_stormwater": "Cornerstone", + "ida_hwm_2021": "Cornerstone", + "prithvi_eo_v2": "Cornerstone", + "microtopo_lidar": "Cornerstone", + "sandy_nta": "Cornerstone", + "dep_extreme_2080_nta": "Cornerstone", + "dep_moderate_2050_nta": "Cornerstone", + "dep_moderate_current_nta": "Cornerstone", + "microtopo_nta": "Cornerstone", + # Keystone (the chip fetch is infrastructure for the LoRA pair, but + # it's logically Keystone-adjacent and we surface it under that + # banner so the trace doesn't show a phantom orphan step). + "mta_entrance_exposure": "Keystone", + "nycha_development_exposure": "Keystone", + "doe_school_exposure": "Keystone", + "doh_hospital_exposure": "Keystone", + "terramind_synthesis": "Keystone", + "eo_chip_fetch": "Keystone", + "terramind_buildings": "Keystone", + # Touchstone + "floodnet": "Touchstone", + "nyc311": "Touchstone", + "nws_obs": "Touchstone", + "noaa_tides": "Touchstone", + "prithvi_eo_live": "Touchstone", + "terramind_lulc": "Touchstone", + "nyc311_nta": "Touchstone", + # Lodestone + "nws_alerts": "Lodestone", + "ttm_forecast": "Lodestone", + "ttm_311_forecast": "Lodestone", + "floodnet_forecast": "Lodestone", + "ttm_battery_surge": "Lodestone", + # Capstone — the reconciler step's name varies between strict and + # legacy paths; both map to Capstone. + "reconcile_granite41": "Capstone", + "mellea_reconcile_address": "Capstone", + "reconcile_neighborhood": "Capstone", + "reconcile_development": "Capstone", + "reconcile_live_now": "Capstone", +} + +# Pretty-printed Stone metadata the frontend renders as parent-row labels. +_STONE_META: dict[str, dict] = { + s.NAME: {"name": s.NAME, "tagline": s.TAGLINE, + "description": s.DESCRIPTION} + for s in DATA_STONES +} +_STONE_META[_capstone_stone.NAME] = { + "name": _capstone_stone.NAME, + "tagline": _capstone_stone.TAGLINE, + "description": _capstone_stone.DESCRIPTION, +} + +ROOT = Path(__file__).resolve().parent +STATIC = ROOT / "static" +SVELTEKIT_BUILD = ROOT / "sveltekit" / "build" + +app = FastAPI(title="Riprap") +app.mount("/static", StaticFiles(directory=STATIC), name="static") + +# SvelteKit static build (adapter-static). Serves the new design-system UI +# from /, /q/sample, /q/. The legacy custom-element pages remain at +# /legacy, /single, /compare, /register/* for as long as they're useful. +if SVELTEKIT_BUILD.exists(): + app.mount("/_app", StaticFiles(directory=SVELTEKIT_BUILD / "_app"), name="sveltekit_assets") + +# Top-level static assets the SvelteKit build emits next to the HTML +# entry points (favicon.svg / favicon.png / robots.txt). These would +# fall through to the SPA fallback and 404 without explicit routes; +# adapter-static expects them under /, not /_app. +def _serve_build_asset(name: str): + p = SVELTEKIT_BUILD / name + if not p.exists(): + return JSONResponse({"detail": "Not Found"}, status_code=404) + return FileResponse(p, headers={"Cache-Control": "public, max-age=86400"}) + + +@app.get("/favicon.svg", include_in_schema=False) +def _favicon_svg(): + return _serve_build_asset("favicon.svg") + + +@app.get("/favicon.png", include_in_schema=False) +def _favicon_png(): + return _serve_build_asset("favicon.png") + + +@app.get("/favicon.ico", include_in_schema=False) +def _favicon_ico(): + # No .ico in the build, but browsers still probe for it. Redirect- + # by-content to the PNG so the tab gets the dam mark either way. + return _serve_build_asset("favicon.png") + + +@app.get("/robots.txt", include_in_schema=False) +def _robots(): + return _serve_build_asset("robots.txt") + +import json as _json # noqa: E402 + +import geopandas as _gpd # noqa: E402 +from fastapi.responses import JSONResponse # noqa: E402 + +_LAYER_CACHE: dict = {} + + +def _clip_simplify(gdf, lat: float, lon: float, radius_m: float = 1500, + simplify_ft: float = 8, props_keep=None): + """Clip a NYC-wide layer to a small bbox around a point and simplify. + + Uses shapely's clip_by_rect (much faster than gpd.overlay on dense + polygons) and a pre-bbox-filter via .cx so we never touch geometries + outside the AOI. + """ + import shapely.geometry as sg + + pt = _gpd.GeoSeries([sg.Point(lon, lat)], crs="EPSG:4326").to_crs("EPSG:2263")[0] + half = radius_m * 3.281 + minx, miny, maxx, maxy = pt.x - half, pt.y - half, pt.x + half, pt.y + half + + sub = gdf.cx[minx:maxx, miny:maxy] + if sub.empty: + return {"type": "FeatureCollection", "features": []} + + clipped = sub.copy() + clipped["geometry"] = sub.geometry.clip_by_rect(minx, miny, maxx, maxy) + clipped = clipped[~clipped.geometry.is_empty & clipped.geometry.notna()] + if clipped.empty: + return {"type": "FeatureCollection", "features": []} + + clipped["geometry"] = clipped.geometry.simplify(simplify_ft, preserve_topology=True) + g = clipped.to_crs("EPSG:4326") + if props_keep is not None: + g = g[[c for c in g.columns if c in props_keep or c == "geometry"]] + else: + g = g[["geometry"]] + return _json.loads(g.to_json()) + + +@app.on_event("startup") +def _warm_caches(): + """Prime slow loads so the first user query doesn't pay the cold-cost penalty.""" + print("[startup] warming flood layers...", flush=True) + sandy_inundation.load() + for scen in ["dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current"]: + dep_stormwater.load(scen) + print("[startup] flood layers ready", flush=True) + if os.environ.get("RIPRAP_NYCHA_REGISTERS", "0").lower() in ("1", "true", "yes"): + print("[startup] pre-loading register catalogs...", flush=True) + try: + # NYCHA + DOE schools read from pre-built JSON catalogs at + # data/registers/{nycha,schools}.json — sub-ms per query. + from app.registers._loader import load_register + n_nycha = len(load_register("nycha")) + n_schools = len(load_register("schools")) + print(f"[startup] catalogs ready: nycha={n_nycha} rows, " + f"schools={n_schools} rows", flush=True) + # DOH hospitals has no pre-built catalog (~150 entries; we + # read the GeoJSON directly and sample baked rasters per hit). + from app.registers import doh_hospitals as _r_hospitals + _r_hospitals._load_hospitals() + print("[startup] hospitals geojson loaded", flush=True) + except Exception as _e: + print(f"[startup] register warm failed (non-fatal): {_e}", flush=True) + print("[startup] warming RAG (Granite Embedding 278M + 5 PDFs)...", flush=True) + # RAG warm loads sentence-transformers, which on some HF Space rebuilds + # has hit transformers-lazy-import edge cases (CodeCarbonCallback). The + # Space *must* start even if RAG fails — the FSM still works without + # RAG citations (specialists deliver their own grounded data, and the + # rag step in fsm.py already handles `rag=[]` gracefully). Surface the + # failure loudly in logs but don't kill the app. + try: + from app import rag + rag.warm() + print("[startup] RAG ready", flush=True) + except Exception as e: # noqa: BLE001 + print(f"[startup] RAG warm FAILED — continuing without RAG: " + f"{type(e).__name__}: {e}", flush=True) + import traceback + traceback.print_exc() + # Pre-import the heavy EO/ML stacks on the main thread so the + # parallel-fanout workers don't race each other on first + # import (sklearn's "partially initialized module" surfaces as a + # spurious ImportError when terratorch / tsfm_public both pull + # sklearn concurrently from worker threads). + # Warm the Ollama LLM models so the first user query doesn't pay a + # cold-load penalty (~70 s for the 3B planner, ~12 s for the 8B + # reconciler at Q4_K_M). Sets keep_alive to 24 h so they stay + # resident across queries. Both calls use num_ctx that matches the + # production call sites (Mellea's 4096), so Ollama's KV cache is + # pre-allocated at the right size and the first reconcile doesn't + # pay an extra grow-and-reinit cost. + if os.environ.get("RIPRAP_SKIP_LLM_WARM", "").lower() not in ("1", "true", "yes"): + print("[startup] warming Ollama models (granite4.1:3b + 8b)...", + flush=True) + try: + import httpx as _httpx + base = os.environ.get( + "OLLAMA_BASE_URL", + os.environ.get("OLLAMA_HOST", "http://localhost:11434"), + ) + if not base.startswith("http"): + base = "http://" + base + keep_alive = os.environ.get("OLLAMA_KEEP_ALIVE", "24h") + num_ctx = int(os.environ.get("RIPRAP_MELLEA_NUM_CTX", "4096")) + for tag in (os.environ.get("RIPRAP_OLLAMA_3B_TAG", "granite4.1:3b"), + os.environ.get("RIPRAP_OLLAMA_8B_TAG", "granite4.1:8b")): + try: + r = _httpx.post( + base.rstrip("/") + "/api/generate", + json={ + "model": tag, + "prompt": "hi", + "stream": False, + "keep_alive": keep_alive, + "options": {"num_ctx": num_ctx, "num_predict": 1}, + }, + timeout=180, + ) + if r.status_code == 200: + load_s = r.json().get("load_duration", 0) / 1e9 + print(f"[startup] {tag} loaded " + f"(load_duration={load_s:.1f}s, " + f"keep_alive={keep_alive}, num_ctx={num_ctx})", + flush=True) + else: + print(f"[startup] {tag} warm failed " + f"({r.status_code})", flush=True) + except Exception as warm_err: + print(f"[startup] {tag} warm failed: {warm_err}", + flush=True) + except Exception as e: + print(f"[startup] LLM warm skipped: {e}", flush=True) + print("[startup] pre-importing terratorch + tsfm_public + transformers...", flush=True) + try: + import sklearn # noqa: F401 prime sklearn first + import terratorch # noqa: F401 + import tsfm_public # noqa: F401 + + # Transformers does lazy-loading via __getattr__; touching + # PreTrainedModel forces the lazy-init to complete on the main + # thread. Otherwise FSM worker threads race the lazy loader and + # surface ModuleNotFoundError("Could not import module + # 'PreTrainedModel'") under load. + from transformers import PreTrainedModel # noqa: F401 + + # tsfm_public's TinyTimeMixerForPrediction import path triggers + # the granite-tsfm side of the lazy chain — pre-warm here too. + from tsfm_public import TinyTimeMixerForPrediction # noqa: F401 + from tsfm_public.toolkit.get_model import get_model # noqa: F401 + except Exception as e: + print(f"[startup] heavy-EO pre-import skipped: {e}", flush=True) + # Force-import every specialist module that does heavy ML at runtime + # so its module-level deps probe + lazy transformers chain runs on + # the main thread, deterministic order, before any FSM worker fans + # out. Modules whose deps genuinely aren't installed will set their + # own `_DEPS_OK = False` here and gracefully no-op at request time; + # what we're avoiding is the "_DEPS_OK = False because of an import + # race" failure mode that fired on the live PS-188 query. + for mod_path in ( + "app.live.ttm_forecast", + "app.live.ttm_battery_surge", + "app.live.floodnet_forecast", + "app.context.gliner_extract", + "app.context.terramind_nyc", + "app.context.eo_chip_cache", + "app.flood_layers.prithvi_live", + ): + try: + __import__(mod_path) + except Exception as e: + print(f"[startup] {mod_path} pre-import skipped: " + f"{type(e).__name__}: {e}", flush=True) + # Warm the TerraMind specialist so first per-query call is just + # the diffusion (~3 s), not model load (~30 s). No-ops if deps + # are missing on this deployment. + try: + from app.context import terramind_synthesis + terramind_synthesis.warm() + print("[startup] TerraMind ready", flush=True) + except Exception as e: + print(f"[startup] TerraMind warm skipped: {e}", flush=True) + + +@app.get("/api/debug/eo") +def api_debug_eo(): + """Diagnostic for the EO toolchain (Phase 1 + Phase 4) on HF Spaces. + + Surfaces sys.path, PYTHONPATH, and per-module import status so we + can tell whether terratorch is actually findable from inside the + uvicorn process. Used to debug why the runtime --target install + appears to succeed in the entrypoint but isn't visible to the + FSM specialists at request time. + """ + import os + import sys + import traceback + from pathlib import Path + + out = { + "python_executable": sys.executable, + "python_version": sys.version, + "PYTHONPATH": os.environ.get("PYTHONPATH"), + "PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"), + "HOME": os.environ.get("HOME"), + "sys.path": sys.path, + } + eo_dir = Path(os.environ.get("HOME", "/home/user")) / ".eo-pkgs" + out["eo_dir"] = str(eo_dir) + out["eo_dir_exists"] = eo_dir.exists() + if eo_dir.exists(): + out["eo_dir_contents"] = sorted(p.name for p in eo_dir.iterdir())[:50] + out["modules"] = {} + for name in ("terratorch", "einops", "diffusers", "timm", + "rasterio", "planetary_computer", "pystac_client"): + try: + mod = __import__(name) + out["modules"][name] = {"ok": True, + "file": getattr(mod, "__file__", "?")} + except Exception as e: + out["modules"][name] = {"ok": False, + "err": f"{type(e).__name__}: {e}", + "tb": traceback.format_exc().splitlines()[-3:]} + return JSONResponse(out) + + +@app.get("/api/backend") +async def api_backend(): + """Live LLM-backend descriptor for the UI's hardware badge. + + Returns the configured primary (vLLM/AMD or Ollama/local), plus a + quick reachability ping so the badge can show whether the primary is + actually answering or whether the Router is on the fallback path. + """ + import httpx + + from app import llm + info = llm.backend_info() + reachable = None + try: + if info["primary"] in ("vllm", "mlx") and info["vllm_base_url"]: + url = info["vllm_base_url"].rstrip("/") + "/models" + async with httpx.AsyncClient(timeout=2.5) as client: + r = await client.get(url, headers={"Authorization": "Bearer ping"}) + # vLLM and mlx_lm.server both return 200 on /v1/models when + # reachable; vLLM may return 401 with --api-key set. Either + # proves the server is up. Anything else = unreachable. + reachable = r.status_code in (200, 401) + else: + url = info["ollama_base_url"].rstrip("/") + "/api/tags" + async with httpx.AsyncClient(timeout=2.5) as client: + r = await client.get(url) + reachable = r.status_code == 200 + except Exception: + reachable = False + info["reachable"] = reachable + info["effective_engine"] = ( + info["engine"] if reachable + else (info.get("fallback_engine") or "offline") + ) + return JSONResponse(info) + + +@app.get("/") +def index(): + """SvelteKit landing page (the new design-system UI).""" + sk = SVELTEKIT_BUILD / "index.html" + if sk.exists(): + return FileResponse(sk) + return JSONResponse( + {"error": "sveltekit build not present — run `cd web/sveltekit && npm run build`"}, + status_code=503, + ) + + +@app.get("/q/sample") +def q_sample_page(): + """The prerendered Red Hook demo briefing (no SSE).""" + sk = SVELTEKIT_BUILD / "q" / "sample.html" + if sk.exists(): + return FileResponse(sk) + return JSONResponse({"error": "sveltekit build not present"}, status_code=503) + + +@app.get("/q/{query_id}") +def q_query_page(query_id: str): # noqa: ARG001 — captured for the SPA router + """Live briefing route. Served by the SvelteKit SPA fallback (200.html); + the client opens an EventSource to /api/agent/stream.""" + sk = SVELTEKIT_BUILD / "200.html" + if sk.exists(): + return FileResponse(sk) + return JSONResponse({"error": "sveltekit build not present"}, status_code=503) + + +@app.get("/print/{query_id}") +def print_page(query_id: str): # noqa: ARG001 — captured by the SPA router + """Curated print artifact for a completed briefing. The client + hydrates from localStorage (key riprap:print:) and + auto-fires window.print() — no backend round-trip.""" + sk = SVELTEKIT_BUILD / "200.html" + if sk.exists(): + return FileResponse(sk) + return JSONResponse({"error": "sveltekit build not present"}, status_code=503) + + +# Legacy custom-element bundle routes (/legacy, /single, /compare, /agent, +# /report, /register/*) were retired in v0.4.5 — the SvelteKit UI fully +# subsumes them. Static assets at /static/* still mount in case anything +# external embeds them, but the page-level routes are gone. Hitting them +# now returns the framework default 404. + + +@app.get("/api/register/{asset_class}") +def api_register(asset_class: str): + """Return a pre-computed asset-class register.""" + if asset_class not in ("schools", "nycha", "mta_entrances"): + return JSONResponse({"error": f"unknown asset class {asset_class!r}"}, + status_code=404) + f = ROOT.parent / "data" / "registers" / f"{asset_class}.json" + if not f.exists(): + script = f"scripts/build_{asset_class}_register.py" + return JSONResponse( + {"error": f"register not built — run python {script}", + "rows": []}, + status_code=503, + ) + return JSONResponse(_json.loads(f.read_text()), + headers={"Cache-Control": "public, max-age=300"}) + + +@app.get("/api/compare") +async def compare_stream(a: str, b: str, request: Request): + """Two parallel FSM runs, results returned as a single SSE stream. + Each event is tagged with side="a" or side="b" so the client can + route updates to the correct panel.""" + import asyncio + import queue + + from app.fsm import iter_steps + + def gen_for_side(side: str, q_text: str, out_q): + try: + for ev in iter_steps(q_text): + ev["side"] = side + out_q.put(ev) + except Exception as e: + out_q.put({"side": side, "kind": "error", "err": str(e)}) + out_q.put({"side": side, "kind": "_done"}) + + out_q: queue.Queue[dict] = queue.Queue() + + def kick(): + # run both sides in parallel threads — each Burr Application owns + # its own state so this is safe, and Ollama with NUM_PARALLEL=2 + # serves both reconcile calls concurrently. + loop = asyncio.get_event_loop() + loop.run_in_executor(None, gen_for_side, "a", a, out_q) + loop.run_in_executor(None, gen_for_side, "b", b, out_q) + + async def event_stream(): + kick() + yield f"event: hello\ndata: {json.dumps({'a': a, 'b': b})}\n\n" + done = 0 + while done < 2: + try: + ev = await asyncio.to_thread(out_q.get, True, 1.0) + except Exception: + continue + if ev.get("kind") == "_done": + done += 1 + continue + if ev.get("kind") == "step": + yield f"event: step\ndata: {json.dumps(ev, default=str)}\n\n" + elif ev.get("kind") == "final": + yield f"event: final\ndata: {json.dumps(ev, default=str)}\n\n" + elif ev.get("kind") == "error": + yield f"event: error\ndata: {json.dumps(ev)}\n\n" + yield "event: done\ndata: {}\n\n" + + return StreamingResponse(event_stream(), media_type="text/event-stream", + headers={"Cache-Control": "no-cache", + "X-Accel-Buffering": "no"}) + + +@app.get("/api/stream") +async def stream(q: str, request: Request): + """Server-sent-events stream: each FSM action yields one event.""" + def gen(): + try: + yield f"event: hello\ndata: {json.dumps({'query': q})}\n\n" + for ev in iter_steps(q): + if ev["kind"] == "step": + yield f"event: step\ndata: {json.dumps(ev, default=str)}\n\n" + else: + yield f"event: final\ndata: {json.dumps(ev, default=str)}\n\n" + yield "event: done\ndata: {}\n\n" + except Exception as e: + yield f"event: error\ndata: {json.dumps({'err': str(e)})}\n\n" + + return StreamingResponse(gen(), media_type="text/event-stream", + headers={"Cache-Control": "no-cache", + "X-Accel-Buffering": "no"}) + + +def _run_compare(p, raw_query: str, out_q, i_addr) -> dict: + """Run the compare intent: execute the full single_address specialist + suite sequentially for each target, then merge the two paragraphs into + one Markdown document clearly labelled PLACE A and PLACE B. + + Sequential execution is required because the FSM uses thread-local hooks + (set_strict_mode, set_token_callback) — concurrent runs on the same + thread would corrupt the hooks. See app/intents/single_address.py. + + Step events from each target are forwarded to out_q tagged with a + `target_label` key so the trace UI can optionally group them, but the + existing trace UI ignores unknown keys gracefully.""" + from app.intents import neighborhood as i_nbhd + from app.planner import Plan + + addr_targets = [t for t in p.targets if t.get("type") in ("address", "nta")] + if len(addr_targets) < 2: + # Fallback: only one (or zero) address extracted — run as single_address + return i_addr.run(p, raw_query, progress_q=out_q, strict=True) + + results = [] + for idx, target in enumerate(addr_targets[:2]): + label = "PLACE A" if idx == 0 else "PLACE B" + addr_text = target["text"] + + if out_q is not None: + # Wrap out_q to tag step events with the target label so the + # trace UI can optionally group them; token/mellea_attempt pass + # through untagged so the SvelteKit briefing buffer works. + _label = label + _q = out_q + class _TaggedQ: + def put(self, ev): + if ev.get("kind") == "step": + _q.put({**ev, "target_label": _label}) + else: + _q.put(ev) + effective_q = _TaggedQ() + else: + effective_q = None + + if target.get("type") == "nta": + sub_plan = Plan( + intent="neighborhood", + targets=[{"type": "nta", "text": addr_text}], + specialists=p.specialists, + rationale=p.rationale, + ) + result = i_nbhd.run(sub_plan, addr_text, progress_q=effective_q, strict=True) + else: + sub_plan = Plan( + intent="single_address", + targets=[{"type": "address", "text": addr_text}], + specialists=p.specialists, + rationale=p.rationale, + ) + result = i_addr.run(sub_plan, addr_text, progress_q=effective_q, strict=True) + results.append((label, addr_text, result)) + + # Merge: produce one paragraph with both place sections. + parts = [] + for label, addr_text, res in results: + para = (res.get("paragraph") or "").strip() + parts.append(f"## {label}: {addr_text}\n\n{para}") + merged_paragraph = "\n\n---\n\n".join(parts) + + # Combine Mellea metadata: sum attempts, union passed/failed. + def _merge_mellea(a, b): + def _lst(m, k): return m.get(k) or [] + return { + "rerolls": (a.get("rerolls") or 0) + (b.get("rerolls") or 0), + "n_attempts": (a.get("n_attempts") or 0) + (b.get("n_attempts") or 0), + "requirements_passed": list(set(_lst(a, "requirements_passed")) & set(_lst(b, "requirements_passed"))), + "requirements_failed": list(set(_lst(a, "requirements_failed") + _lst(b, "requirements_failed"))), + "requirements_total": max(a.get("requirements_total") or 0, b.get("requirements_total") or 0), + } + + mellea_a = results[0][2].get("mellea") or {} + mellea_b = results[1][2].get("mellea") or {} + + # Spread Place A's full specialist state into the return dict so + # adaptFinalToFindings can build evidence cards (TTM, TerraMind, Prithvi, + # Sandy, etc.) from the higher-risk location. Place B's live-state data + # is available via targets[].state for future per-location card rendering. + # Without this, _run_compare returned only paragraph/mellea/intent/targets + # and all fine-tuned model cards were silently suppressed (state keys + # missing → card builders returned null). + out = {**results[0][2]} + out.update({ + "paragraph": merged_paragraph, + "mellea": _merge_mellea(mellea_a, mellea_b), + "intent": "compare", + "targets": [ + {"label": lbl, "address": addr, "state": res} + for lbl, addr, res in results + ], + "tier": results[0][2].get("tier"), + }) + return out + + +@app.get("/api/agent") +def api_agent(q: str): + """Agentic endpoint: take a natural-language query, plan it via + Granite 4.1, dispatch to the appropriate intent module, return the + full result as JSON. The Plan is included so callers can see the + agent's routing decision. + + All non-trivial reconciliation (single_address / neighborhood / + development_check) routes through Mellea-validated rejection + sampling against four grounding requirements. live_now stays on + streaming reconcile because outputs are short and the live signals + have low hallucination surface.""" + from app.intents import development_check as i_dev + from app.intents import live_now as i_live + from app.intents import neighborhood as i_nbhd + from app.intents import single_address as i_addr + from app.planner import plan as run_planner + tracker = emissions.Tracker() + emissions.install(tracker) + try: + p = run_planner(q) + if p.intent == "not_implemented": + return JSONResponse({ + "paragraph": p.rationale, + "mellea": {"rerolls": 0, "n_attempts": 0, + "requirements_passed": [], "requirements_failed": [], + "requirements_total": 0}, + "status": "not_implemented", + "emissions": tracker.summarize(), + }) + if p.intent == "compare": + out = _run_compare(p, q, None, i_addr) + elif p.intent == "development_check": + out = i_dev.run(p, q, strict=True) + elif p.intent == "neighborhood": + out = i_nbhd.run(p, q, strict=True) + elif p.intent == "live_now": + out = i_live.run(p, q) + else: + out = i_addr.run(p, q, strict=True) + out["emissions"] = tracker.summarize() + return JSONResponse(out) + finally: + emissions.install(None) + + +@app.get("/api/agent/stream") +async def api_agent_stream(q: str): + """SSE: emit `plan` once the planner finishes, then a `step` event per + finalized specialist, then `final` with the full result. The intent + runs in a thread; we marshal events through a queue.""" + import asyncio + import queue + out_q: queue.Queue[dict] = queue.Queue() + + tracker = emissions.Tracker() + + def runner(): + emissions.install(tracker) + try: + from app.intents import development_check as i_dev + from app.intents import live_now as i_live + from app.intents import neighborhood as i_nbhd + from app.intents import single_address as i_addr + from app.planner import plan as run_planner + + def _on_plan_token(delta: str): + out_q.put({"kind": "plan_token", "delta": delta}) + p = run_planner(q, on_token=_on_plan_token) + out_q.put({"kind": "plan", + "intent": p.intent, + "targets": p.targets, + "specialists": p.specialists, + "rationale": p.rationale}) + if p.intent == "not_implemented": + final = { + "paragraph": p.rationale, + "mellea": {"rerolls": 0, "n_attempts": 0, + "requirements_passed": [], + "requirements_failed": [], + "requirements_total": 0}, + "status": "not_implemented", + } + elif p.intent == "compare": + final = _run_compare(p, q, out_q, i_addr) + elif p.intent == "development_check": + final = i_dev.run(p, q, progress_q=out_q, strict=True) + elif p.intent == "neighborhood": + final = i_nbhd.run(p, q, progress_q=out_q, strict=True) + elif p.intent == "live_now": + final = i_live.run(p, q, progress_q=out_q) + else: + final = i_addr.run(p, q, progress_q=out_q, strict=True) + final["emissions"] = tracker.summarize() + out_q.put({"kind": "final", **final}) + except Exception as e: + out_q.put({"kind": "error", "err": str(e)}) + finally: + emissions.install(None) + out_q.put({"kind": "_done"}) + + async def event_stream(): + loop = asyncio.get_event_loop() + loop.run_in_executor(None, runner) + yield f"event: hello\ndata: {json.dumps({'query': q})}\n\n" + + # Stone-boundary envelope: track current Stone so we can wrap + # contiguous step events in stone_start / stone_done. step + # events whose name maps to None (geocode, rag, gliner) flow + # through without opening a Stone — those are orientation / + # ancillary, not part of any data-Stone group. + current_stone: str | None = None + stone_step_count: dict[str, int] = {} + + def _open(stone: str) -> str: + stone_step_count[stone] = 0 + payload = {**_STONE_META.get(stone, {"name": stone})} + return f"event: stone_start\ndata: {json.dumps(payload)}\n\n" + + def _close(stone: str) -> str: + payload = { + **_STONE_META.get(stone, {"name": stone}), + "n_steps": stone_step_count.get(stone, 0), + } + return f"event: stone_done\ndata: {json.dumps(payload)}\n\n" + + while True: + try: + ev = await asyncio.to_thread(out_q.get, True, 1.0) + except Exception: + continue + kind = ev.get("kind") + if kind == "_done": + break + + # First reconcile token implies the data-Stones are done + # and the Capstone has begun, even if the FSM step event + # for reconcile hasn't fired yet (it fires AFTER the + # generation finishes). Open Capstone here so the UI + # shows it lighting up while tokens stream. + if kind == "token" and current_stone != "Capstone": + if current_stone is not None: + yield _close(current_stone) + current_stone = "Capstone" + yield _open(current_stone) + + if kind == "step": + step_name = ev.get("step") or "" + stone = _STEP_TO_STONE.get(step_name) + if stone is not None: + if stone != current_stone: + if current_stone is not None: + yield _close(current_stone) + current_stone = stone + yield _open(current_stone) + stone_step_count[stone] = ( + stone_step_count.get(stone, 0) + 1) + + # `final` arrives after the Capstone has produced its + # paragraph. Close the Capstone before forwarding final + # so the trace cleanly reads: ... stone_done(Capstone), + # final, done. + if kind == "final" and current_stone is not None: + yield _close(current_stone) + current_stone = None + + yield f"event: {kind}\ndata: {json.dumps(ev, default=str)}\n\n" + + # Pipeline ended without a final (error / abort) — close any + # still-open Stone so the client doesn't render an unbounded + # parent row. + if current_stone is not None: + yield _close(current_stone) + yield "event: done\ndata: {}\n\n" + + return StreamingResponse(event_stream(), media_type="text/event-stream", + headers={"Cache-Control": "no-cache", + "X-Accel-Buffering": "no"}) + + +@app.get("/api/agent/plan") +def api_agent_plan(q: str): + """Just the plan, no execution. Useful for showing the agent's routing + decision before running specialists.""" + from app.planner import plan as run_planner + p = run_planner(q) + return JSONResponse({ + "intent": p.intent, + "targets": p.targets, + "specialists": p.specialists, + "rationale": p.rationale, + }) + + +@app.get("/api/layers/nta") +def layer_nta(code: str): + """Return the NTA polygon for a given NTA code as GeoJSON (EPSG:4326).""" + from app.areas import nta as nta_mod + g = nta_mod.load() + sub = g[g["nta2020"] == code][["nta2020", "ntaname", "boroname", "geometry"]] + if sub.empty: + return JSONResponse({"type": "FeatureCollection", "features": []}, status_code=404) + return JSONResponse(_json.loads(sub.to_json()), + headers={"Cache-Control": "public, max-age=3600"}) + + +@app.get("/api/layers/sandy_clipped") +def layer_sandy_clipped(code: str): + """Sandy inundation polygons clipped to an NTA bbox + simplified. + Used by the agent map for neighborhood / development_check intents.""" + from app.areas import nta as nta_mod + from app.flood_layers import sandy_inundation + poly = nta_mod.polygon_for(code) + if poly is None: + return JSONResponse({"type": "FeatureCollection", "features": []}) + bounds = poly.bounds + cx, cy = (bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2 + # bbox half-extent in metres ~ half the polygon span × 111 km/deg + half_m = max((bounds[2] - bounds[0]), (bounds[3] - bounds[1])) / 2 * 111_000 + return JSONResponse(_clip_simplify(sandy_inundation.load(), cy, cx, half_m * 1.2), + headers={"Cache-Control": "public, max-age=600"}) + + +@app.get("/api/layers/dep_clipped") +def layer_dep_clipped(code: str, scenario: str = "dep_extreme_2080"): + """DEP scenario polygons clipped to an NTA bbox + simplified.""" + from app.areas import nta as nta_mod + from app.flood_layers import dep_stormwater + poly = nta_mod.polygon_for(code) + if poly is None: + return JSONResponse({"type": "FeatureCollection", "features": []}) + bounds = poly.bounds + cx, cy = (bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2 + half_m = max((bounds[2] - bounds[0]), (bounds[3] - bounds[1])) / 2 * 111_000 + return JSONResponse(_clip_simplify(dep_stormwater.load(scenario), cy, cx, half_m * 1.2, + props_keep={"Flooding_Category"}), + headers={"Cache-Control": "public, max-age=600"}) + + +@app.get("/api/layers/sandy") +def layer_sandy(lat: float, lon: float, r: float = 1500): + key = ("sandy", round(lat, 4), round(lon, 4), int(r)) + if key not in _LAYER_CACHE: + _LAYER_CACHE[key] = _clip_simplify(sandy_inundation.load(), lat, lon, r) + return JSONResponse(_LAYER_CACHE[key], + headers={"Cache-Control": "public, max-age=3600"}) + + +@app.get("/api/layers/dep_extreme_2080") +def layer_dep_2080(lat: float, lon: float, r: float = 1500): + key = ("dep2080", round(lat, 4), round(lon, 4), int(r)) + if key not in _LAYER_CACHE: + _LAYER_CACHE[key] = _clip_simplify( + dep_stormwater.load("dep_extreme_2080"), + lat, lon, r, props_keep={"Flooding_Category"}) + return JSONResponse(_LAYER_CACHE[key], + headers={"Cache-Control": "public, max-age=3600"}) + + +@app.get("/api/layers/prithvi_water") +def layer_prithvi_water(lat: float, lon: float, r: float = 1500): + """Prithvi-EO 2.0 (Sen1Floods11) satellite water mask, clipped to a + bbox around the address for performance.""" + key = ("prithvi", round(lat, 4), round(lon, 4), int(r)) + if key not in _LAYER_CACHE: + from app.flood_layers import prithvi_water as pw + gdf, _meta = pw._load() + if gdf is None: + return JSONResponse({"type": "FeatureCollection", "features": []}) + _LAYER_CACHE[key] = _clip_simplify(gdf, lat, lon, r, + props_keep=set(), + simplify_ft=4) + return JSONResponse(_LAYER_CACHE[key], + headers={"Cache-Control": "public, max-age=3600"}) + + +@app.get("/api/layers/ida_hwm") +def layer_ida_hwm(lat: float, lon: float, r: float = 1500): + """USGS Hurricane Ida 2021 high-water marks within radius_m of (lat, lon). + Returns GeoJSON FeatureCollection of Point features. No geopandas needed — + HWMs are already points so haversine filter is sufficient.""" + from app.flood_layers import ida_hwm as _ida + features = [] + for f in _ida._load(): + flon, flat = f["geometry"]["coordinates"] + d = _ida._haversine_m(lat, lon, flat, flon) + if d <= r: + p = f["properties"] + features.append({ + "type": "Feature", + "geometry": f["geometry"], + "properties": { + "hwm_id": p.get("hwm_id"), + "site_description": p.get("site_description"), + "elev_ft": p.get("elev_ft"), + "height_above_gnd_ft": p.get("height_above_gnd"), + "hwm_quality": p.get("hwm_quality"), + "waterbody": p.get("waterbody"), + "distance_m": round(d, 0), + }, + }) + return JSONResponse({"type": "FeatureCollection", "features": features}, + headers={"Cache-Control": "public, max-age=3600"}) + + +@app.get("/api/floodnet_near") +def floodnet_near(lat: float, lon: float, r: float = 1000): + sensors = floodnet.sensors_near(lat, lon, r) + ids = [s.deployment_id for s in sensors] + events = floodnet.flood_events_for(ids) + by_dep: dict = {} + for e in events: + by_dep.setdefault(e.deployment_id, []).append(e) + + features = [] + for s in sensors: + if s.lat is None or s.lon is None: + continue + evs = by_dep.get(s.deployment_id, []) + peak = max((e.max_depth_mm or 0 for e in evs), default=0) + features.append({ + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [s.lon, s.lat]}, + "properties": { + "deployment_id": s.deployment_id, + "name": s.name, + "street": s.street, + "borough": s.borough, + "n_events_3y": len(evs), + "peak_depth_mm": peak, + }, + }) + return JSONResponse({"type": "FeatureCollection", "features": features}) diff --git a/web/static/agent.html b/web/static/agent.html new file mode 100644 index 0000000000000000000000000000000000000000..94e8743af86704d512067e3a99bb800b7d1d57f1 --- /dev/null +++ b/web/static/agent.html @@ -0,0 +1,523 @@ + + + + + +Riprap — agent + + + + + +
+
+
+ Riprap + · + citation-grounded flood-exposure briefings for NYC +
+
+ + checking… + +
+
+
+ +
+
+ + +
+
+ +
+ Try: + + + + + + + +
+ +
+ +
+ + +
+
+
+ + +
+ +
+ + +
+ + + + + + + diff --git a/web/static/agent.js b/web/static/agent.js new file mode 100644 index 0000000000000000000000000000000000000000..8cb5d84c038ab183ae717cf9ac63ed902fd6aedb --- /dev/null +++ b/web/static/agent.js @@ -0,0 +1,1430 @@ +// Riprap agent client — three-panel UI with live SSE streaming, intent- +// dispatched map, and structured report rendering. + +const $ = (s) => document.querySelector(s); + +const STEP_LABELS = { + // single_address chain (linear FSM) + geocode: ["Geocode (DCP Geosearch)", "address → lat/lon, BBL"], + sandy_inundation: ["Sandy Inundation (NYC OD)", "empirical 2012 extent"], + dep_stormwater: ["DEP Stormwater Maps", "pluvial scenarios + 2080 SLR"], + floodnet: ["FloodNet sensor network", "live ultrasonic depth sensors"], + nyc311: ["NYC 311 archive", "flood complaints in 200m"], + noaa_tides: ["NOAA Tides & Currents (live)", "Battery / Kings Pt / Sandy Hook"], + nws_alerts: ["NWS Public Alerts (live)", "active flood-relevant alerts"], + nws_obs: ["NWS METAR observation (live)", "nearest ASOS recent precipitation"], + ttm_forecast: ["Granite TTM r2 — surge nowcast", "9.6h forecast at the closest of Battery / Kings Pt / Sandy Hook"], + ttm_311_forecast: ["Granite TTM r2 — 311 forecast", "4-week per-address flood-complaint forecast (52w history)"], + floodnet_forecast: ["Granite TTM r2 — FloodNet forecast", "flood-event recurrence forecast at nearest FloodNet sensor"], + ttm_battery_surge: ["Granite TTM r2 — Battery surge (NYC fine-tune)", "96 h hourly surge nowcast at NOAA Battery (msradam/Granite-TTM-r2-Battery-Surge)"], + mta_entrance_exposure: ["MTA subway entrances", "subway-entrance exposure (point-in-polygon Sandy + DEP)"], + nycha_development_exposure: ["NYCHA developments", "NYCHA campus footprint × Sandy + DEP overlap %"], + doe_school_exposure: ["NYC DOE schools", "school-point exposure (Sandy + DEP)"], + doh_hospital_exposure: ["NYS DOH hospitals", "Article-28 hospital exposure (Sandy + DEP)"], + microtopo_lidar: ["LiDAR terrain (DEM + TWI + HAND)", "USGS 3DEP DEM + whitebox-workflows"], + ida_hwm_2021: ["Ida 2021 high-water marks", "USGS empirical post-event extent"], + prithvi_eo_v2: ["Prithvi-EO 2.0 (NASA/IBM)", "Sen1Floods11 satellite segmentation"], + prithvi_eo_live: ["Prithvi-EO 2.0 — live segmentation","fresh Sentinel-2 water mask at this address"], + terramind_synthesis: ["TerraMind 1.0 base — synthetic LULC", "DEM → ESRI Land Cover, any-to-any generative synthesis (IBM/ESA)"], + eo_chip_fetch: ["EO chip fetch (S2L2A + S1RTC + DEM)", "single-chip cache for the TerraMind-NYC LoRA family"], + terramind_lulc: ["TerraMind-NYC — LULC (live)", "5-class macro land-cover LoRA (msradam/TerraMind-NYC-Adapters)"], + terramind_buildings: ["TerraMind-NYC — Buildings (live)", "binary building-footprint LoRA (msradam/TerraMind-NYC-Adapters)"], + rag_granite_embedding: ["Granite Embedding 278M (RAG)", "policy corpus retrieval (+ Granite Reranker R2 if enabled)"], + gliner_extract: ["GLiNER typed extraction", "agencies, dollar amounts, projects, locations"], + reconcile_granite41: ["Granite 4.1 reconcile (local)", "document-grounded synthesis"], + // neighborhood + dev_check + nta_resolve: ["NTA polygon resolve", "name → NYC NTA 2020 polygon"], + sandy_nta: ["Sandy 2012, polygon-aggregated", "% of NTA inside 2012 inundation"], + dep_extreme_2080_nta: ["DEP Extreme-2080, polygon", "% of NTA in modeled flooding"], + dep_moderate_2050_nta: ["DEP Moderate-2050, polygon", "% of NTA in modeled flooding"], + dep_moderate_current_nta:["DEP Moderate-current, polygon", "% of NTA in modeled flooding"], + nyc311_nta: ["NYC 311, polygon-aggregated", "complaints inside polygon"], + microtopo_nta: ["LiDAR terrain, polygon", "median HAND/TWI + flood bands"], + rag_nta: ["Granite Embedding RAG (NTA)", "policy retrieval for the place"], + reconcile_neighborhood: ["Granite 4.1 reconcile (NTA)", "polygon-flavored briefing"], + // dev_check + dob_permits_nta: ["NYC DOB permits in polygon", "active NB / A1 / DM jobs ↔ flood layers"], + rag_dev: ["Granite Embedding RAG (dev)", "policy on new construction in flood zones"], + reconcile_development: ["Granite 4.1 reconcile (dev)", "flagged-projects briefing"], + // live_now + reconcile_live_now: ["Granite 4.1 reconcile (live)", "current-conditions briefing"], +}; + +const SOURCE_LABELS = { + geocode: "NYC DCP Geosearch", + nta_resolve: "NYC DCP Neighborhood Tabulation Areas 2020", + sandy: "NYC OD 5xsi-dfpx — Sandy 2012 inundation", + sandy_nta: "Sandy 2012 inundation, polygon-aggregated", + dep_extreme_2080: "NYC DEP Stormwater — Extreme-2080", + dep_moderate_2050: "NYC DEP Stormwater — Moderate-2050", + dep_moderate_current: "NYC DEP Stormwater — Moderate-current", + dep_extreme_2080_nta: "NYC DEP Extreme-2080, polygon-aggregated", + dep_moderate_2050_nta: "NYC DEP Moderate-2050, polygon-aggregated", + dep_moderate_current_nta: "NYC DEP Moderate-current, polygon-aggregated", + floodnet: "FloodNet NYC", + nyc311: "NYC 311 (erm2-nwe9)", + nyc311_nta: "NYC 311, polygon-aggregated", + microtopo: "USGS 3DEP DEM", + microtopo_nta: "USGS 3DEP DEM, polygon-aggregated", + ida_hwm: "USGS Hurricane Ida 2021 HWMs", + prithvi_water: "Prithvi-EO 2.0 — Hurricane Ida 2021 polygons", + prithvi_live: "Prithvi-EO 2.0 NYC-Pluvial v2 — live Sentinel-2 water segmentation (msradam/Prithvi-EO-2.0-NYC-Pluvial)", + terramind_synthetic: "TerraMind 1.0 base — synthetic LULC (DEM→ESRI Land Cover)", + tm_lulc: "TerraMind-NYC LULC LoRA (msradam/TerraMind-NYC-Adapters)", + tm_buildings: "TerraMind-NYC Buildings LoRA (msradam/TerraMind-NYC-Adapters)", + gliner_comptroller: "GLiNER over Comptroller report", + gliner_dep_2013: "GLiNER over DEP wastewater plan", + gliner_nycha: "GLiNER over NYCHA Lessons Learned", + gliner_mta: "GLiNER over MTA Climate Resilience Roadmap", + gliner_coned: "GLiNER over Con Edison Climate Resilience", + noaa_tides: "NOAA CO-OPS Tides & Currents", + nws_alerts: "NWS Public Alerts", + nws_obs: "NWS Station Observations", + ttm_forecast: "Granite TimeSeries TTM r2 — surge residual nowcast", + ttm_311_forecast: "Granite TimeSeries TTM r2 — per-address 311 weekly forecast", + floodnet_forecast: "Granite TimeSeries TTM r2 — FloodNet sensor recurrence forecast", + ttm_battery: "Granite TTM r2 NYC fine-tune — 96 h Battery surge nowcast (msradam/Granite-TTM-r2-Battery-Surge)", + dob_permits: "NYC DOB Permit Issuance (Socrata ipu4-2q9a)", + live_target: "Riprap planner — live target", + rag_comptroller: 'NYC Comptroller — "Is NYC Ready for Rain?" (2024)', + rag_npcc4: "NPCC4 (2024)", + rag_mta: "MTA Climate Resilience Roadmap", + rag_nycha: "NYCHA Flood Resilience: Lessons Learned", + rag_coned: "Con Edison Climate Resilience Plan", + // Register-specialist family labels — chip lookups for dynamic + // doc_ids (mta_entrance_, nycha_dev_, doe_school_, + // nyc_hospital_) fall through to these via family-prefix match. + mta_entrance: "MTA subway-entrance exposure (Open Data)", + nycha_dev: "NYCHA development exposure (NYC OD phvi-damg)", + doe_school: "NYC DOE school exposure", + nyc_hospital: "NYS DOH hospital exposure (vn5v-hh5r)", +}; + +// Canonical URL per doc_id — clicking a source row opens the underlying +// dataset / API / report in a new tab so users can verify provenance. +const SOURCE_URLS = { + geocode: "https://geosearch.planninglabs.nyc/", + nta_resolve: "https://www.nyc.gov/site/planning/data-maps/open-data/dwn-nynta.page", + sandy: "https://data.cityofnewyork.us/Environment/Sandy-Inundation-Zone/uyj8-7rv5", + sandy_nta: "https://data.cityofnewyork.us/Environment/Sandy-Inundation-Zone/uyj8-7rv5", + dep_extreme_2080: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Map-Extreme-Flood-with-Curren/w8eg-8ha6", + dep_moderate_2050: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Map-Moderate-Flood-with-Curre/9i7c-xyvv", + dep_moderate_current: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Map-Moderate-Flood/5rzh-cyqd", + dep_extreme_2080_nta: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Map-Extreme-Flood-with-Curren/w8eg-8ha6", + dep_moderate_2050_nta: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Map-Moderate-Flood-with-Curre/9i7c-xyvv", + dep_moderate_current_nta: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Map-Moderate-Flood/5rzh-cyqd", + floodnet: "https://www.floodnet.nyc/", + nyc311: "https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9", + nyc311_nta: "https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9", + microtopo: "https://www.usgs.gov/3d-elevation-program", + microtopo_nta: "https://www.usgs.gov/3d-elevation-program", + ida_hwm: "https://stn.wim.usgs.gov/STNDataPortal/", + prithvi_water: "https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11", + prithvi_live: "https://huggingface.co/msradam/Prithvi-EO-2.0-NYC-Pluvial", + terramind_synthetic: "https://huggingface.co/ibm-esa-geospatial/TerraMind-1.0-base", + tm_lulc: "https://huggingface.co/msradam/TerraMind-NYC-Adapters", + tm_buildings: "https://huggingface.co/msradam/TerraMind-NYC-Adapters", + gliner_comptroller: "https://huggingface.co/urchade/gliner_medium-v2.1", + gliner_dep_2013: "https://huggingface.co/urchade/gliner_medium-v2.1", + gliner_nycha: "https://huggingface.co/urchade/gliner_medium-v2.1", + gliner_mta: "https://huggingface.co/urchade/gliner_medium-v2.1", + gliner_coned: "https://huggingface.co/urchade/gliner_medium-v2.1", + noaa_tides: "https://tidesandcurrents.noaa.gov/", + nws_alerts: "https://www.weather.gov/documentation/services-web-api", + nws_obs: "https://www.weather.gov/documentation/services-web-api", + ttm_forecast: "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2", + ttm_311_forecast: "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2", + floodnet_forecast: "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2", + ttm_battery: "https://huggingface.co/msradam/Granite-TTM-r2-Battery-Surge", + dob_permits: "https://data.cityofnewyork.us/Housing-Development/DOB-Permit-Issuance/ipu4-2q9a", + rag_comptroller: "https://comptroller.nyc.gov/reports/is-new-york-city-ready-for-rain/", + rag_npcc4: "https://nyaspubs.onlinelibrary.wiley.com/toc/17496632/2024/1539/1", + rag_mta: "https://new.mta.info/sustainability/climate-resilience", + rag_nycha: "https://www.nyc.gov/site/nycha/about/sustainability.page", + rag_coned: "https://www.coned.com/en/our-energy-future/climate-change-resilience", + mta_entrance: "https://data.ny.gov/Transportation/MTA-Subway-Entrances-and-Exits-2024/i9wp-a4ja", + nycha_dev: "https://data.cityofnewyork.us/Housing-Development/Map-of-NYCHA-Developments/i9rv-hdr5", + doe_school: "https://data.cityofnewyork.us/Education/School-Locations/jfju-ynrr", + nyc_hospital: "https://health.data.ny.gov/Health/Health-Facility-Certification-Information/2g9y-7kqm", +}; + +// Per-source vintage / "as of" — what date the underlying data represents. +// For live sources, the answer is "live; observation timestamps in payload". +// For archival sources, this is the dataset publication or extent date. +const SOURCE_VINTAGES = { + geocode: "live (NYC DCP Geosearch v2)", + nta_resolve: "NYC NTA 2020 boundaries (DCP, Sept 2022 release)", + sandy: "Sandy 2012 inundation extent (NYC OEM survey, dataset published 2013)", + sandy_nta: "Sandy 2012 inundation extent (polygon-aggregated)", + dep_extreme_2080: "NYC DEP Stormwater Flood Map — Extreme + 2080 SLR (2021 release)", + dep_moderate_2050: "NYC DEP Stormwater Flood Map — Moderate + 2050 SLR (2021 release)", + dep_moderate_current: "NYC DEP Stormwater Flood Map — Moderate, current SLR (2021 release)", + dep_extreme_2080_nta: "NYC DEP Extreme-2080 (2021 release; polygon-aggregated)", + dep_moderate_2050_nta: "NYC DEP Moderate-2050 (2021 release; polygon-aggregated)", + dep_moderate_current_nta: "NYC DEP Moderate-current (2021 release; polygon-aggregated)", + floodnet: "live FloodNet sensor stream (per-event timestamps in payload)", + nyc311: "live NYC 311 archive, trailing 5-year window (latest record in payload)", + nyc311_nta: "live NYC 311 archive, trailing 3-year window (polygon-aggregated)", + microtopo: "USGS 3DEP DEM (NYC LiDAR collect, ~2018) + derived HAND/TWI", + microtopo_nta: "USGS 3DEP DEM (NYC ~2018) — polygon-aggregated stats", + ida_hwm: "USGS Short-Term Network Event 312 — Hurricane Ida 2021 high-water marks (Sept 1-2 2021 survey)", + prithvi_water: "Prithvi-EO 2.0 satellite segmentation, scenes 2021-08-25 (pre) & 2021-09-02 (post Ida)", + prithvi_live: "live Sentinel-2 L2A scene from Microsoft Planetary Computer (acquisition timestamp in payload), segmented by the NYC-Pluvial v2 fine-tune of Prithvi-EO 2.0 (test flood IoU 0.5979)", + terramind_synthetic: "synthetic prior — TerraMind 1.0 base generated a plausible categorical land-cover map from the LiDAR terrain at this point (deterministic seed, 10 diffusion steps; class fractions cite-able; not a measurement)", + tm_lulc: "live empirical observation — TerraMind-NYC LULC LoRA (msradam/TerraMind-NYC-Adapters, fine-tuned on NYC chips on AMD MI300X) over the per-query Sentinel-2/1/DEM chip; 5-class macro land cover with class fractions cite-able", + tm_buildings: "live empirical observation — TerraMind-NYC Buildings LoRA (msradam/TerraMind-NYC-Adapters, fine-tuned on NYC chips on AMD MI300X) over the per-query Sentinel-2/1/DEM chip; binary building-footprint mask + connected-component count", + gliner_comptroller: "GLiNER typed extraction over the Comptroller PDF (per-paragraph)", + gliner_dep_2013: "GLiNER typed extraction over the DEP wastewater plan", + gliner_nycha: "GLiNER typed extraction over the NYCHA Lessons Learned PDF", + gliner_mta: "GLiNER typed extraction over the MTA Resilience Roadmap", + gliner_coned: "GLiNER typed extraction over the Con Edison Climate Resilience plan", + noaa_tides: "live NOAA CO-OPS, 6-min cadence (observation time in payload)", + nws_alerts: "live NWS Public Alerts API (effective/expires in payload)", + nws_obs: "live NWS hourly METAR observation (observation time in payload)", + ttm_forecast: "live TTM forecast based on trailing 51 h at the closest NOAA gauge to this address (Battery / Kings Pt / Sandy Hook)", + ttm_311_forecast: "live TTM forecast based on trailing 52 weeks of NYC 311 flood complaints within 200 m of this address", + floodnet_forecast: "live TTM forecast based on the 512-day daily flood-event series at the nearest FloodNet sensor", + ttm_battery: "live NYC fine-tuned TTM forecast based on the trailing 1024 hours (~43 days) of hourly surge residual at the Battery; 96 h horizon", + dob_permits: "live NYC DOB Permit Issuance, trailing 18-month window (per-permit issuance dates in payload)", + rag_comptroller: "NYC Comptroller report 'Is NYC Ready for Rain?' (2024)", + rag_npcc4: "NPCC4 — NYC Climate Assessment 4th edition, Annals NYAS vol. 1539 (2024)", + rag_mta: "MTA Climate Resilience Roadmap, October 2025 update", + rag_nycha: "NYCHA Flood Resilience: Lessons Learned (post-Sandy)", + rag_coned: "Con Edison Climate Change Resilience Plan, NY PSC Case 22-E-0222 (2023)", + scope_note: "Riprap planner — geographic scope guard (this query)", + live_target: "Riprap planner — live target (this query)", + mta_entrance: "MTA Open Data subway-entrance geometry (refreshed monthly) joined to Sandy 2012 + DEP scenarios + USGS 3DEP DEM", + nycha_dev: "NYC Open Data NYCHA Developments (phvi-damg) joined to Sandy 2012 + DEP scenarios + USGS 3DEP DEM", + doe_school: "NYC DOE Locations Points (1992 schools) joined to Sandy 2012 + DEP scenarios + USGS 3DEP DEM", + nyc_hospital: "NYS DOH Health Facility Certification (vn5v-hh5r, NYC counties + fac_desc_short=HOSP) joined to Sandy 2012 + DEP scenarios + USGS 3DEP DEM", +}; + +const INTENT_PILL_CLASS = { + development_check: "dev", + live_now: "live", + neighborhood: "nbhd", + single_address: "addr", +}; + +// --------------------------------------------------------------------------- +// MAP +// --------------------------------------------------------------------------- + +let map = null; +let mapInit = false; + +function ensureMap() { + if (mapInit) return; + mapInit = true; + map = new maplibregl.Map({ + container: "map", + style: { + version: 8, + // CARTO Voyager — more editorial typography + softer palette than + // Positron, no API key required. Retina (@2x) tiles for crisp type. + sources: { + basemap: { + type: "raster", + tiles: [ + "https://a.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}@2x.png", + "https://b.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}@2x.png", + "https://c.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}@2x.png", + "https://d.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}@2x.png", + ], + tileSize: 256, + attribution: "© OpenStreetMap contributors © CARTO", + }, + }, + layers: [ + { id: "bg", type: "background", paint: { "background-color": "#f3f5f8" } }, + { id: "basemap", type: "raster", source: "basemap" }, + ], + }, + center: [-74.0, 40.72], + zoom: 10, + attributionControl: { compact: true }, + // Required for map.getCanvas().toDataURL() to work on the report-export + // path. Otherwise the WebGL drawing buffer is cleared after each frame + // and snapshots come back blank. + preserveDrawingBuffer: true, + }); + map.addControl(new maplibregl.NavigationControl({ visualizePitch: false }), "top-right"); + map.on("load", initMapSources); +} + +function initMapSources() { + // Sandy + DEP overlays (used for nbhd / dev_check) + map.addSource("sandy", { type: "geojson", data: empty() }); + map.addLayer({ id: "sandy-fill", type: "fill", source: "sandy", + paint: { "fill-color": "#fc5d52", "fill-opacity": 0.25 } }); + map.addLayer({ id: "sandy-line", type: "line", source: "sandy", + paint: { "line-color": "#fc5d52", "line-width": 0.5, "line-opacity": 0.7 } }); + + map.addSource("dep", { type: "geojson", data: empty() }); + map.addLayer({ id: "dep-fill", type: "fill", source: "dep", + paint: { + "fill-color": ["match", ["get", "Flooding_Category"], + 1, "#568adf", 2, "#1642DF", 3, "#031553", "#568adf"], + "fill-opacity": 0.32 } }); + + // Prithvi-EO 2.0 live water-segmentation polygons. Cyan to differ + // visually from Sandy (red) and DEP (blue) — this is *observed today* + // water from the latest cloud-free Sentinel-2 scene, not a modeled + // scenario. We outline + fill so even sliver geometries (river edges, + // canal banks) show up at street zoom. + map.addSource("prithvi_live", { type: "geojson", data: empty() }); + map.addLayer({ id: "prithvi-live-fill", type: "fill", source: "prithvi_live", + paint: { "fill-color": "#48c6eb", "fill-opacity": 0.45 } }); + map.addLayer({ id: "prithvi-live-line", type: "line", source: "prithvi_live", + paint: { "line-color": "#1aa3c8", "line-width": 1.2, "line-opacity": 0.85 } }); + + // TerraMind synthesised LULC polygons — *synthetic-prior* tier + // (4th epistemic class). Per-feature fill_color carried from the + // server side so the legend stays in one place. Dashed outline so + // it visually reads as "synthesized, not observed". + map.addSource("terramind_lulc", { type: "geojson", data: empty() }); + map.addLayer({ id: "terramind-lulc-fill", type: "fill", + source: "terramind_lulc", + paint: { + "fill-color": ["coalesce", ["get", "fill_color"], "#9ca3af"], + "fill-opacity": 0.30, + }, + }); + map.addLayer({ id: "terramind-lulc-line", type: "line", + source: "terramind_lulc", + paint: { + "line-color": ["coalesce", ["get", "fill_color"], "#9ca3af"], + "line-width": 1.0, + "line-dasharray": [2, 2], + "line-opacity": 0.65, + }, + }); + map.on("click", "terramind-lulc-fill", (e) => { + const f = e.features[0]; const p = f.properties; + new maplibregl.Popup().setLngLat(e.lngLat) + .setHTML(`TerraMind synthetic land-cover
` + + `Class: ${escapeHtml(p.label || "")} (tentative)
` + + `Synthesised from LiDAR DEM, not observed.`) + .addTo(map); + }); + + // NTA polygon outline + map.addSource("nta", { type: "geojson", data: empty() }); + map.addLayer({ id: "nta-line", type: "line", source: "nta", + paint: { "line-color": "#0b3b6b", "line-width": 2.4, "line-opacity": 0.9 } }); + map.addLayer({ id: "nta-fill", type: "fill", source: "nta", + paint: { "fill-color": "#0b3b6b", "fill-opacity": 0.04 } }); + + // DOB permit pins + map.addSource("permits", { type: "geojson", data: empty() }); + map.addLayer({ id: "permits-circles", type: "circle", source: "permits", + paint: { + "circle-radius": ["case", ["get", "any_flood"], 6, 4], + "circle-color": [ + "case", + ["get", "in_sandy"], "#fc5d52", + [">=", ["get", "dep_max_class"], 2], "#1642DF", + [">", ["get", "dep_max_class"], 0], "#568adf", + "#1a8754", + ], + "circle-stroke-color": "#ffffff", + "circle-stroke-width": 1.4, + "circle-opacity": 0.95, + } }); + map.on("click", "permits-circles", (e) => { + const f = e.features[0]; const p = f.properties; + new maplibregl.Popup() + .setLngLat(f.geometry.coordinates) + .setHTML( + `${escapeHtml(p.address || "(unknown)")}
` + + `${p.job_type} · ${p.in_sandy === 'true' ? 'Sandy zone' : 'outside Sandy'}
` + + `DEP class: ${p.dep_max_class}`) + .addTo(map); + }); + + // Address pin (single_address intent) + map.addSource("addr", { type: "geojson", data: empty() }); + map.addLayer({ id: "addr-pin", type: "circle", source: "addr", + paint: { "circle-radius": 10, "circle-color": "#0b3b6b", + "circle-stroke-color": "#fff", "circle-stroke-width": 3 } }); + + // Search-radius circles (200 m / 600 m / 800 m). Visualizes the + // spatial scope each specialist is reading from. Drawn as a thin + // line so the underlying point data is readable through them. + map.addSource("scope", { type: "geojson", data: empty() }); + map.addLayer({ id: "scope-line", type: "line", source: "scope", + paint: { "line-color": "#0b3b6b", "line-width": 1.0, + "line-opacity": 0.55, "line-dasharray": [3, 3] } }); + + // NYC 311 flood complaint pins — coloured by descriptor. + map.addSource("nyc311_pts", { type: "geojson", data: empty() }); + map.addLayer({ id: "nyc311-circles", type: "circle", source: "nyc311_pts", + paint: { + "circle-radius": 4.5, + "circle-color": ["match", ["get", "descriptor"], + "Sewer Backup (Use Comments) (SA)", "#fc5d52", + "Catch Basin Clogged/Flooding (Use Comments) (SC)", "#f59e0b", + "Street Flooding (SJ)", "#1642DF", + "Manhole Overflow (Use Comments) (SA1)", "#8b5cf6", + "#6b7280", + ], + "circle-stroke-color": "#ffffff", + "circle-stroke-width": 1.0, + "circle-opacity": 0.85, + }, + }); + map.on("click", "nyc311-circles", (e) => { + const f = e.features[0]; const p = f.properties; + new maplibregl.Popup().setLngLat(f.geometry.coordinates) + .setHTML(`311 complaint
${escapeHtml(p.descriptor || "")}
` + + `${escapeHtml(p.date || "")}
${escapeHtml(p.address || "")}`) + .addTo(map); + }); + + // FloodNet sensors — triangles via SDF circle stand-in (cyan, + // larger if the sensor has triggered events). + map.addSource("floodnet_pts", { type: "geojson", data: empty() }); + map.addLayer({ id: "floodnet-circles", type: "circle", source: "floodnet_pts", + paint: { + "circle-radius": 7, + "circle-color": "#48c6eb", + "circle-stroke-color": "#1aa3c8", + "circle-stroke-width": 2.0, + "circle-opacity": 0.95, + }, + }); + map.on("click", "floodnet-circles", (e) => { + const f = e.features[0]; const p = f.properties; + new maplibregl.Popup().setLngLat(f.geometry.coordinates) + .setHTML(`FloodNet sensor
${escapeHtml(p.name || p.deployment_id || "")}`) + .addTo(map); + }); + + // USGS Hurricane Ida 2021 high-water marks — hot orange, sized by height. + map.addSource("ida_hwm_pts", { type: "geojson", data: empty() }); + map.addLayer({ id: "ida-hwm-circles", type: "circle", source: "ida_hwm_pts", + paint: { + "circle-radius": ["interpolate", ["linear"], + ["coalesce", ["get", "height_above_gnd_ft"], 0], + 0, 4, 3, 7, 6, 11], + "circle-color": "#ea580c", + "circle-stroke-color": "#7c2d12", + "circle-stroke-width": 1.4, + "circle-opacity": 0.92, + }, + }); + map.on("click", "ida-hwm-circles", (e) => { + const f = e.features[0]; const p = f.properties; + new maplibregl.Popup().setLngLat(f.geometry.coordinates) + .setHTML(`USGS Ida 2021 high-water mark
` + + `${escapeHtml(p.site || "(unnamed)")}
` + + `Elevation: ${p.elev_ft ?? "?"} ft
` + + `Height above ground: ${p.height_above_gnd_ft ?? "?"} ft`) + .addTo(map); + }); + + // NOAA tide gauge marker — shows which of the 3 gauges is active. + map.addSource("noaa_gauge", { type: "geojson", data: empty() }); + map.addLayer({ id: "noaa-gauge-marker", type: "circle", source: "noaa_gauge", + paint: { + "circle-radius": 9, + "circle-color": "#0ea5e9", + "circle-stroke-color": "#fff", + "circle-stroke-width": 2.5, + }, + }); + map.on("click", "noaa-gauge-marker", (e) => { + const f = e.features[0]; const p = f.properties; + new maplibregl.Popup().setLngLat(f.geometry.coordinates) + .setHTML(`NOAA tide gauge
${escapeHtml(p.name || "")}
` + + `Observed water level: ${p.observed_ft ?? "?"} ft MLLW
` + + `Residual (≈ surge): ${p.residual_ft ?? "?"} ft`) + .addTo(map); + }); +} + +// ~3 m/° latitude × cos(lat) for longitude. Build a circle polygon +// approximating a fixed-radius (meters) buffer around (lat, lon). +function metersBuffer(lat, lon, meters, steps = 64) { + const dLat = meters / 111_000.0; + const dLon = meters / (111_000.0 * Math.cos(lat * Math.PI / 180)); + const ring = []; + for (let i = 0; i <= steps; i++) { + const a = (i / steps) * 2 * Math.PI; + ring.push([lon + dLon * Math.cos(a), lat + dLat * Math.sin(a)]); + } + return { type: "Polygon", coordinates: [ring] }; +} + +function empty() { return { type: "FeatureCollection", features: [] }; } + +function clearMap() { + if (!map || !map.getSource) return; + for (const id of ["sandy", "dep", "nta", "permits", "addr", "prithvi_live", + "terramind_lulc", + "scope", "nyc311_pts", "floodnet_pts", "ida_hwm_pts", + "noaa_gauge"]) { + const s = map.getSource(id); + if (s) s.setData(empty()); + } +} + +async function fillMapForFinal(d) { + if (!map || !map.loaded()) { + map.once("load", () => fillMapForFinal(d)); + return; + } + clearMap(); + const intent = d.intent; + if (intent === "single_address") return fillMapAddress(d); + if (intent === "neighborhood") return fillMapNeighborhood(d); + if (intent === "development_check") return fillMapDevelopment(d); + if (intent === "live_now") return fillMapLive(d); +} + +async function fillMapAddress(d) { + const geo = d.geocode; + if (!geo || !geo.lat) return; + map.flyTo({ center: [geo.lon, geo.lat], zoom: 15.5, duration: 700 }); + map.getSource("addr").setData({ type: "FeatureCollection", + features: [{ type: "Feature", + geometry: { type: "Point", coordinates: [geo.lon, geo.lat] }, properties: {} }] }); + // Fetch Sandy + DEP layers clipped to address + try { + const r = await fetch(`/api/layers/sandy?lat=${geo.lat}&lon=${geo.lon}&r=1500`); + map.getSource("sandy").setData(await r.json()); + } catch {} + try { + const r = await fetch(`/api/layers/dep_extreme_2080?lat=${geo.lat}&lon=${geo.lon}&r=1500`); + map.getSource("dep").setData(await r.json()); + } catch {} + // Prithvi-EO live water mask comes inlined in the SSE final event, + // not via a separate /api/layers fetch — it's per-query, not corpus. + const live = d.prithvi_live; + if (live && live.ok && live.polygons_geojson && map.getSource("prithvi_live")) { + map.getSource("prithvi_live").setData(live.polygons_geojson); + } + + // TerraMind synthesised LULC polygons — same per-query pattern. + const tm = d.terramind; + if (tm && tm.ok && tm.polygons_geojson && map.getSource("terramind_lulc")) { + map.getSource("terramind_lulc").setData(tm.polygons_geojson); + } + + // ---- search-radius scope rings (200 m / 600 m / 800 m) ---- + // Three rings matching the buffers each specialist actually reads: + // 200 m for 311, 600 m for FloodNet sensors, 800 m for Ida HWMs. + if (map.getSource("scope")) { + map.getSource("scope").setData({ + type: "FeatureCollection", + features: [200, 600, 800].map(r => ({ + type: "Feature", + geometry: metersBuffer(geo.lat, geo.lon, r), + properties: { radius_m: r }, + })), + }); + } + + // ---- NYC 311 flood complaint pins ---- + const c311 = d.nyc311 || {}; + const c311Pts = c311.points || []; + if (map.getSource("nyc311_pts")) { + map.getSource("nyc311_pts").setData({ + type: "FeatureCollection", + features: c311Pts.filter(p => p.lat && p.lon).map(p => ({ + type: "Feature", + geometry: { type: "Point", coordinates: [p.lon, p.lat] }, + properties: { + descriptor: p.descriptor || "", + date: p.date || "", + address: p.address || "", + }, + })), + }); + } + + // ---- FloodNet sensors ---- + const fn = d.floodnet || {}; + const fnSensors = fn.sensors || []; + if (map.getSource("floodnet_pts")) { + map.getSource("floodnet_pts").setData({ + type: "FeatureCollection", + features: fnSensors.filter(s => s.lat && s.lon).map(s => ({ + type: "Feature", + geometry: { type: "Point", coordinates: [s.lon, s.lat] }, + properties: { + name: s.name || s.deployment_id || "", + deployment_id: s.deployment_id || "", + }, + })), + }); + } + + // ---- USGS Ida 2021 HWMs ---- + const hwm = d.ida_hwm || {}; + const hwmPts = hwm.points || []; + if (map.getSource("ida_hwm_pts")) { + map.getSource("ida_hwm_pts").setData({ + type: "FeatureCollection", + features: hwmPts.filter(p => p.lat && p.lon).map(p => ({ + type: "Feature", + geometry: { type: "Point", coordinates: [p.lon, p.lat] }, + properties: { + site: p.site || "", + elev_ft: p.elev_ft, + height_above_gnd_ft: p.height_above_gnd_ft, + }, + })), + }); + } + + // ---- NOAA tide gauge marker ---- + const tides = d.noaa_tides || {}; + if (tides.station_id && tides.station_lat && tides.station_lon && + map.getSource("noaa_gauge")) { + map.getSource("noaa_gauge").setData({ + type: "FeatureCollection", + features: [{ + type: "Feature", + geometry: { type: "Point", + coordinates: [tides.station_lon, tides.station_lat] }, + properties: { + name: tides.station_name || tides.station_id, + observed_ft: tides.observed_ft_mllw, + residual_ft: tides.residual_ft, + }, + }], + }); + } +} + +async function fillMapNeighborhood(d) { + const t = d.target; + if (!t || !t.bbox || !t.nta_code) return; + const [minx, miny, maxx, maxy] = t.bbox; + map.fitBounds([[minx, miny], [maxx, maxy]], { padding: 32, duration: 700 }); + const [r1, r2, r3] = await Promise.all([ + fetch(`/api/layers/nta?code=${t.nta_code}`).then(r => r.json()), + fetch(`/api/layers/sandy_clipped?code=${t.nta_code}`).then(r => r.json()).catch(() => empty()), + fetch(`/api/layers/dep_clipped?code=${t.nta_code}&scenario=dep_extreme_2080`).then(r => r.json()).catch(() => empty()), + ]); + map.getSource("nta").setData(r1); + map.getSource("sandy").setData(r2); + map.getSource("dep").setData(r3); + // Prithvi-EO live water mask (NTA centroid) — same per-query GeoJSON + // as the single_address path; clipped visually to the NTA polygon by + // the basemap zoom. + const live = d.prithvi_live; + if (live && live.ok && live.polygons_geojson && map.getSource("prithvi_live")) { + map.getSource("prithvi_live").setData(live.polygons_geojson); + } + // TerraMind synthesised LULC at NTA centroid. + const tm = d.terramind; + if (tm && tm.ok && tm.polygons_geojson && map.getSource("terramind_lulc")) { + map.getSource("terramind_lulc").setData(tm.polygons_geojson); + } +} + +async function fillMapDevelopment(d) { + await fillMapNeighborhood(d); // same NTA + Sandy + DEP overlays + const pins = ((d.dob_summary || {}).all_pins) || []; + const fc = { + type: "FeatureCollection", + features: pins.filter(p => p.lat && p.lon).map(p => ({ + type: "Feature", + geometry: { type: "Point", coordinates: [p.lon, p.lat] }, + properties: { + address: p.address, job_type: p.job_type, + in_sandy: !!p.in_sandy, any_flood: !!p.any_flood, + dep_max_class: p.dep_max_class || 0, + }, + })), + }; + map.getSource("permits").setData(fc); +} + +function fillMapLive(d) { + // NYC overview with the 3 NOAA gauges + map.flyTo({ center: [-74.0, 40.7], zoom: 10, duration: 700 }); +} + +// Fire as each FSM step completes, so the map updates progressively +// instead of waiting for the `final` event. Each branch is idempotent — +// it's safe if `final` later overwrites with the same data. +async function incrementallyFillMap(step) { + if (!map || !map.loaded()) { + map.once("load", () => incrementallyFillMap(step)); + return; + } + const r = step.result || {}; + // Address mode — geocode just resolved + if (step.step === "geocode" && r.lat != null && r.lon != null) { + map.flyTo({ center: [r.lon, r.lat], zoom: 15.5, duration: 700 }); + map.getSource("addr").setData({ type: "FeatureCollection", + features: [{ type: "Feature", + geometry: { type: "Point", coordinates: [r.lon, r.lat] }, properties: {} }] }); + Promise.all([ + fetch(`/api/layers/sandy?lat=${r.lat}&lon=${r.lon}&r=1500`).then(x => x.json()).catch(() => empty()), + fetch(`/api/layers/dep_extreme_2080?lat=${r.lat}&lon=${r.lon}&r=1500`).then(x => x.json()).catch(() => empty()), + ]).then(([s, d]) => { + map.getSource("sandy").setData(s); + map.getSource("dep").setData(d); + }); + return; + } + // Neighborhood / dev_check — NTA polygon resolved + if (step.step === "nta_resolve" && r.nta_code && r.bbox) { + const [minx, miny, maxx, maxy] = r.bbox; + map.fitBounds([[minx, miny], [maxx, maxy]], { padding: 32, duration: 700 }); + Promise.all([ + fetch(`/api/layers/nta?code=${r.nta_code}`).then(x => x.json()).catch(() => empty()), + fetch(`/api/layers/sandy_clipped?code=${r.nta_code}`).then(x => x.json()).catch(() => empty()), + fetch(`/api/layers/dep_clipped?code=${r.nta_code}&scenario=dep_extreme_2080`).then(x => x.json()).catch(() => empty()), + ]).then(([n, s, d]) => { + map.getSource("nta").setData(n); + map.getSource("sandy").setData(s); + map.getSource("dep").setData(d); + }); + return; + } + // Dev_check — DOB permits arrived; pin them now + if (step.step === "dob_permits_nta" && Array.isArray(r.all_pins)) { + const fc = { type: "FeatureCollection", + features: r.all_pins.filter(p => p.lat && p.lon).map(p => ({ + type: "Feature", + geometry: { type: "Point", coordinates: [p.lon, p.lat] }, + properties: { + address: p.address, job_type: p.job_type, + in_sandy: !!p.in_sandy, any_flood: !!p.any_flood, + dep_max_class: p.dep_max_class || 0, + }, + })) }; + map.getSource("permits").setData(fc); + return; + } +} + +// --------------------------------------------------------------------------- +// REPORT (paragraph) RENDERING +// --------------------------------------------------------------------------- + +function escapeHtml(s) { + return String(s ?? "").replace(/&/g, "&").replace(//g, ">"); +} + +let CITE_INDEX = {}; +// Resolve a doc_id to its source-label family. Register specialists emit +// per-asset doc_ids like `mta_entrance_54` / `nycha_dev_004` — for those +// we strip the trailing `_` and look up the family key. +const _FAMILY_PREFIXES = ["mta_entrance", "nycha_dev", "doe_school", "nyc_hospital"]; +function _docIdFamily(norm) { + for (const fam of _FAMILY_PREFIXES) { + if (norm.startsWith(fam + "_")) return fam; + } + return null; +} +function _resolveSourceLabel(norm) { + if (SOURCE_LABELS[norm]) return SOURCE_LABELS[norm]; + const fam = _docIdFamily(norm); + return fam ? SOURCE_LABELS[fam] : norm; +} +function rewriteCitations(html) { + return html.replace(/\[([a-z0-9_]+)\]/gi, (_, id) => { + const norm = id.toLowerCase(); + if (CITE_INDEX[norm] == null) CITE_INDEX[norm] = Object.keys(CITE_INDEX).length + 1; + const n = CITE_INDEX[norm]; + return `${n}`; + }); +} + +// Sources footer is a Lit web component () — driven +// by the citeIndex signal in /static/components/signals.js. We feed +// it the labels/urls/vintages once at boot and update the signal as +// the briefing markdown is rendered. +async function renderSources() { + const el = document.getElementById("sourcesFooter"); + if (!el) return; + // Module is loaded async; wait for define() then push fresh data. + await customElements.whenDefined("r-sources-footer"); + el.labels = SOURCE_LABELS; + el.urls = SOURCE_URLS; + el.vintages = SOURCE_VINTAGES; + // Push the citation index into the shared signal — the component + // re-renders reactively. + const { citeIndex } = await import("/static/components/signals.js"); + citeIndex.set({ ...CITE_INDEX }); +} + +function renderMarkdown(text) { + // Block recognizer: + // `**Header.**` (own line) →

+ // lines starting `- ` or `* ` → bullet items collected into
    + // anything else →

    + // Inline `**foo**` → + const lines = text.split("\n"); + const out = []; + let para = []; let bullets = []; + const flushPara = () => { + if (!para.length) return; + const safe = escapeHtml(para.join(" ").trim()).replace(/\*\*([^*]+)\*\*/g, "$1"); + if (safe) out.push(`

    ${safe}

    `); + para = []; + }; + const flushBullets = () => { + if (!bullets.length) return; + const items = bullets.map(b => { + const safe = escapeHtml(b.trim()).replace(/\*\*([^*]+)\*\*/g, "$1"); + return `
  • ${safe}
  • `; + }).join(""); + out.push(`
      ${items}
    `); + bullets = []; + }; + // Granite sometimes runs all bullets onto one line separated by " - "; + // pre-split those so each becomes its own bullet. + const expanded = []; + for (const line of lines) { + if (line.trim().startsWith("- ") && line.includes(" - ", 2)) { + // split into bullets + const parts = line.split(/(?:^|(?<=\.\s))\s*-\s+/g).filter(p => p.trim()); + for (const p of parts) expanded.push("- " + p.trim()); + } else { + expanded.push(line); + } + } + for (const line of expanded) { + const m = line.match(/^\s*\*\*([A-Z][A-Za-z\s/]+)\.\*\*\s*$/); + if (m) { + flushPara(); flushBullets(); + out.push(`

    ${escapeHtml(m[1])}

    `); + continue; + } + if (/^\s*[-*]\s+/.test(line)) { + flushPara(); + bullets.push(line.replace(/^\s*[-*]\s+/, "")); + } else { + flushBullets(); + para.push(line); + } + } + flushPara(); flushBullets(); + return out.join(""); +} + +// Briefing is now the Lit web component. It owns markdown +// rendering, citation chip binding, and pushing CITE_INDEX into the +// shared signal — agent.js just feeds it `.text` + `.sourceLabels`. +async function setBriefingText(text) { + const el = document.getElementById("paragraph"); + if (!el) return; + await customElements.whenDefined("r-briefing"); + el.sourceLabels = SOURCE_LABELS; + el.text = text || ""; +} +function renderParagraph(text) { setBriefingText(text); } + +// --------------------------------------------------------------------------- +// FACTS PANEL — intent-specific quick-look stats below the map +// --------------------------------------------------------------------------- + +function renderFacts(d) { + const intent = d.intent; + const panel = $("#factsPanel"); + const body = $("#factsBody"); + panel.style.display = ""; + if (intent === "neighborhood") renderNbhdFacts(d, body); + else if (intent === "development_check") renderDevFacts(d, body); + else if (intent === "live_now") renderLiveFacts(d, body); + else if (intent === "single_address") renderAddressFacts(d, body); +} + +function renderNbhdFacts(d, body) { + $("#factsTitle").textContent = `Findings — ${d.target?.nta_name || ""}`; + const s = d.sandy_nta || {}; const dep = d.dep_nta || {}; + const m = d.microtopo_nta || {}; const c = d.nyc311_nta || {}; + const sandyPct = s.fraction != null ? (s.fraction * 100).toFixed(1) + "%" : "—"; + const dep80 = (dep.dep_extreme_2080 || {}).fraction_any; + const dep50 = (dep.dep_moderate_2050 || {}).fraction_any; + body.innerHTML = ` +
    ${sandyPct}
    +
    of the neighborhood is inside the 2012 Sandy Inundation Zone
    +
    +
    DEP Extreme 2080
    ${dep80!=null ? (dep80*100).toFixed(1)+"%" : "—"}
    +
    DEP Moderate 2050
    ${dep50!=null ? (dep50*100).toFixed(1)+"%" : "—"}
    +
    311 (3 yr)
    ${c.n ?? "—"} flood complaints
    +
    HAND median
    ${m.hand_median_m != null ? m.hand_median_m+" m" : "—"}
    +
    HAND < 1 m fraction
    ${m.frac_hand_lt1 != null ? (m.frac_hand_lt1*100).toFixed(0)+"%" : "—"}
    +
    TWI median
    ${m.twi_median ?? "—"}
    +
    `; +} + +function renderDevFacts(d, body) { + $("#factsTitle").textContent = `Active construction — ${d.target?.nta_name || ""}`; + const ds = d.dob_summary || {}; + body.innerHTML = ` +
    ${ds.n_in_sandy ?? 0} / ${ds.n_total ?? 0}
    +
    active projects inside the Sandy zone
    +
    +
    Total active
    ${ds.n_total ?? 0}
    +
    In any DEP scenario
    ${ds.n_in_dep_any ?? 0}
    +
    In severe DEP (≥1 ft)
    ${ds.n_in_dep_severe ?? 0}
    +
    By job type
    ${Object.entries(ds.by_job_type || {}).map(([k,v]) => `${v} ${k}`).join(", ")}
    +
    `; +} + +function renderLiveFacts(d, body) { + $("#factsTitle").textContent = `Live conditions — ${d.place || "NYC"}`; + const t = d.noaa_tides || {}; const a = d.nws_alerts || {}; const o = d.nws_obs || {}; + const ttm = d.ttm_forecast || {}; + const r = t.residual_ft; + body.innerHTML = ` +
    ${a.n_active ?? 0} alerts
    +
    active flood-relevant NWS alerts at this point
    +
    +
    Tide gauge
    ${t.station_name || "—"}
    +
    Observed
    ${t.observed_ft_mllw != null ? t.observed_ft_mllw+" ft MLLW" : "—"}
    +
    Residual
    ${r != null ? (r >= 0 ? "+" : "")+r+" ft" : "—"}
    +
    Nearest ASOS
    ${o.station_id || "—"}
    +
    Precip 1h
    ${o.precip_last_hour_mm != null ? o.precip_last_hour_mm+" mm" : "—"}
    +
    TTM peak (next 9.6h)
    ${ttm.forecast_peak_ft != null ? ttm.forecast_peak_ft+" ft" : "—"}
    +
    `; +} + +function renderAddressFacts(d, body) { + $("#factsTitle").textContent = "Findings"; + const geo = d.geocode || {}; + const dep = d.dep || {}; const e80 = (dep.dep_extreme_2080 || {}); + const m = d.microtopo || {}; + body.innerHTML = ` +
    ${geo.address || "—"}
    +
    +
    Sandy zone
    ${d.sandy ? "INSIDE" : "outside"}
    +
    DEP Extreme 2080
    ${e80.depth_label || "—"}
    +
    HAND
    ${m.hand_m != null ? m.hand_m+" m" : "—"}
    +
    TWI
    ${m.twi ?? "—"}
    +
    Elev pct (200m)
    ${m.rel_elev_pct_200m ?? "—"}
    +
    311 (5y, 200m)
    ${(d.nyc311 || {}).n ?? "—"}
    +
    `; +} + +// --------------------------------------------------------------------------- +// TRACE PANEL +// --------------------------------------------------------------------------- + +// Trace list is a Lit web component (); pushTraceStep delegates +// once the component is registered. STEP_LABELS is set on the element +// at boot. +async function pushTraceStep(step) { + const el = document.getElementById("steps"); + if (!el) return; + await customElements.whenDefined("r-trace"); + if (!el.stepLabels || !Object.keys(el.stepLabels).length) { + el.stepLabels = STEP_LABELS; + } + el.pushStep(step); +} + +async function clearTrace() { + const el = document.getElementById("steps"); + if (el) { + await customElements.whenDefined("r-trace"); + el.clear(); + } + $("#traceMeta").textContent = ""; +} + +// -------------------------------------------------------------------------- +// Loading-state and chrome helpers +// -------------------------------------------------------------------------- + +function setMapLoading(text) { + const el = $("#mapLoading"); + if (!el) return; + if (text) { + el.style.display = ""; + $("#mapLoadingText").textContent = text; + } else { + el.style.display = "none"; + } +} + +function setLegend(intent) { + const el = $("#mapLegend"); + if (!el) return; + // Reusable legend rows shared across intents. + const empirical = ` +
    Sandy 2012 extent
    +
    DEP Extreme-2080
    `; + const points = ` +
    311 — sewer backup
    +
    311 — catch basin
    +
    311 — street flooding
    +
    FloodNet sensor
    +
    Ida 2021 high-water mark
    +
    NOAA tide gauge
    `; + // Synthetic-prior tier — distinct visual idiom (dashed) so users + // immediately read it as "generated, not observed". + const synthetic = ` +
    Synthetic priors (not observed)
    +
    Prithvi-EO 2.0 — live water mask
    +
    TerraMind — synthetic LULC (DEM→ESRI Land Cover, dashed = generated)
    `; + + if (intent === "development_check") { + el.innerHTML = ` +
    Active permits
    +
    Inside Sandy zone
    +
    DEP deep band (≥1 ft)
    +
    DEP nuisance band
    +
    No flood layer
    +
    ${empirical}
    ${synthetic}`; + el.style.display = ""; + } else if (intent === "neighborhood") { + el.innerHTML = `${empirical} +
    NTA boundary
    ${synthetic}`; + el.style.display = ""; + } else if (intent === "single_address") { + el.innerHTML = ` +
    Address
    ${empirical}${points}${synthetic}`; + el.style.display = ""; + } else { + el.style.display = "none"; + } +} + +// Mirrors app/score.py.composite() — see ARCHITECTURE.md / METHODOLOGY.md. +// Used only for the single_address intent badge; neighborhood and +// development_check have their own headline stats in the facts panel. +const REG_W = { fema_1pct: 1.0, fema_02pct: 0.5, + dep_moderate_2050: 0.75, dep_extreme_2080: 0.50, dep_tidal_2050: 0.75 }; +const HYD_W = { hand_band: 1.0, twi_quartile: 0.5, + elev_pct_200m_inv: 0.5, elev_pct_750m_inv: 0.5, basin_relief_band: 0.25 }; +const EMP_W = { sandy: 1.0, ida_hwm_within_100m: 1.0, ida_hwm_within_800m: 0.5, + prithvi_polygon: 0.75, complaints_band: 0.75, floodnet_trigger: 0.75 }; +const handBand = h => h == null ? 0 : (h < 1 ? 1 : h < 3 ? 0.66 : h < 10 ? 0.33 : 0); +const pctInvBand = p => p == null ? 0 : (p < 10 ? 1 : p < 25 ? 0.66 : p < 50 ? 0.33 : 0); +const twiBand = t => t == null ? 0 : (t >= 12 ? 1 : t >= 10 ? 0.66 : t >= 8 ? 0.33 : 0); +const reliefBand = r => r == null ? 0 : (r >= 8 ? 1 : r >= 4 ? 0.66 : r >= 2 ? 0.33 : 0); +const complBand = n => !n ? 0 : (n >= 10 ? 1 : n >= 3 ? 0.66 : 0.33); +const sumW = w => Object.values(w).reduce((a, b) => a + b, 0); + +function computeComposite(ev) { + const dep = ev.dep || {}, mt = ev.microtopo || {}, ida = ev.ida_hwm || {}, pw = ev.prithvi_water || {}; + const s = { + fema_1pct: false, fema_02pct: false, + dep_moderate_2050: (dep.dep_moderate_2050?.depth_class || 0) > 0, + dep_extreme_2080: (dep.dep_extreme_2080?.depth_class || 0) > 0, + dep_tidal_2050: false, + hand_m: mt.hand_m, twi: mt.twi, + rel_elev_pct_200m: mt.rel_elev_pct_200m, + rel_elev_pct_750m: mt.rel_elev_pct_750m, + basin_relief_m: mt.basin_relief_m, + sandy: !!ev.sandy, + ida_hwm_within_100m: (ida.nearest_dist_m != null && ida.nearest_dist_m < 100), + ida_hwm_within_800m: (ida.n_within_radius || 0) > 0, + prithvi_polygon: !!pw.inside_water_polygon, + complaints_count: ev.nyc311?.n || 0, + floodnet_trigger: (ev.floodnet?.n_flood_events_3y || 0) > 0, + }; + let regRaw = 0; for (const [k, w] of Object.entries(REG_W)) regRaw += s[k] ? w : 0; + const reg = regRaw / sumW(REG_W); + const hb = { hand_band: handBand(s.hand_m), twi_quartile: twiBand(s.twi), + elev_pct_200m_inv: pctInvBand(s.rel_elev_pct_200m), + elev_pct_750m_inv: pctInvBand(s.rel_elev_pct_750m), + basin_relief_band: reliefBand(s.basin_relief_m) }; + let hydRaw = 0; for (const [k, w] of Object.entries(HYD_W)) hydRaw += w * hb[k]; + const hyd = hydRaw / sumW(HYD_W); + const ev2 = { sandy: s.sandy ? 1 : 0, + ida_hwm_within_100m: s.ida_hwm_within_100m ? 1 : 0, + ida_hwm_within_800m: s.ida_hwm_within_800m ? 1 : 0, + prithvi_polygon: s.prithvi_polygon ? 1 : 0, + complaints_band: complBand(s.complaints_count), + floodnet_trigger: s.floodnet_trigger ? 1 : 0 }; + let empRaw = 0; for (const [k, w] of Object.entries(EMP_W)) empRaw += w * ev2[k]; + const emp = empRaw / sumW(EMP_W); + const composite = reg + hyd + emp; + let tier = 0; + if (composite >= 1.50) tier = 1; + else if (composite >= 1.00) tier = 2; + else if (composite >= 0.50) tier = 3; + else if (composite >= 0.01) tier = 4; + const floorApplied = !!(s.sandy || s.ida_hwm_within_100m); + if (floorApplied && (tier === 0 || tier > 2)) tier = 2; + return { tier, composite, floorApplied, + sub: { regulatory: reg, hydrological: hyd, empirical: emp } }; +} + +function tierMeta(tier) { + if (tier === 1) return { tier, label: "High exposure", + help: "Multiple sub-indices saturated. Not a damage probability." }; + if (tier === 2) return { tier, label: "Elevated exposure", + help: "At least one sub-index near saturation. Not a damage probability." }; + if (tier === 3) return { tier, label: "Moderate exposure", + help: "Partial signals across categories. Not a damage probability." }; + if (tier === 4) return { tier, label: "Limited exposure", + help: "A single contextual signal." }; + return { tier: 0, label: "No flagged exposure", + help: "No positive flood signal across the assessed sources." }; +} + +function renderBriefHead(d) { + const intent = d.intent; + const place = (d.target && d.target.nta_name) + || (d.geocode && d.geocode.address) + || d.place || "—"; + const meta = []; + const eyebrowMap = { + single_address: "Flood-exposure briefing — address", + neighborhood: "Flood-exposure briefing — neighborhood", + development_check: "Active development × flood exposure", + live_now: "Current conditions — NYC", + }; + $("#briefEyebrow").textContent = eyebrowMap[intent] || "Briefing"; + $("#briefTitle").innerHTML = escapeHtml(place); + + // For single_address intent, append the tier badge inline with the title + // — same idiom as the legacy /single page. + if (intent === "single_address") { + const c = computeComposite(d); + const m = tierMeta(c.tier); + const titleEl = $("#briefTitle"); + const floor = c.floorApplied ? ' empirical floor' : ""; + titleEl.innerHTML += ` + Tier ${m.tier} · ${escapeHtml(m.label)}${floor} + `; + } + + // Mellea compliance badge — present iff strict mode ran and produced + // metadata. Color reflects pass ratio: green for full, amber partial, + // red none. + if (d.mellea) { + const m = d.mellea; + const passed = (m.requirements_passed || []).length; + const total = m.requirements_total || 0; + const cls = passed === total ? "full" + : passed > 0 ? "partial" + : "none"; + const tip = `Mellea (IBM Research) ran ${m.n_attempts} attempt${m.n_attempts === 1 ? "" : "s"}` + + ` (${m.rerolls} reroll${m.rerolls === 1 ? "" : "s"}). ` + + `Requirements passed: ${(m.requirements_passed || []).join(", ") || "none"}. ` + + (m.requirements_failed?.length + ? `Failed: ${m.requirements_failed.join(", ")}.` : ""); + $("#briefTitle").innerHTML += + ` ` + + `Mellea ${passed}/${total}` + + (m.rerolls > 0 ? ` · ${m.rerolls} reroll${m.rerolls === 1 ? "" : "s"}` : "") + + ``; + } + if (intent === "single_address" && d.geocode) { + if (d.geocode.borough) meta.push(`borough ${escapeHtml(d.geocode.borough)}`); + if (d.geocode.bbl) meta.push(`bbl ${escapeHtml(d.geocode.bbl)}`); + } else if (d.target && d.target.borough) { + meta.push(`borough ${escapeHtml(d.target.borough)}`); + if (d.target.nta_code) meta.push(`nta ${escapeHtml(d.target.nta_code)}`); + } + if (d.total_s != null) meta.push(`runtime ${d.total_s}s`); + meta.push(`assessed ${new Date().toISOString().slice(0,16).replace("T"," ")}`); + $("#briefMeta").innerHTML = meta.join('·'); +} + +// --------------------------------------------------------------------------- +// PLANNER ROW +// --------------------------------------------------------------------------- + +function renderPlan(p) { + const pillCls = INTENT_PILL_CLASS[p.intent] || ""; + $("#plannerRow").innerHTML = ` +
    +
    Planner
    +
    ${escapeHtml(p.intent)}
    +
    Targets
    +
    ${(p.targets || []).map(t => escapeHtml(t.type) + ":" + escapeHtml(t.text)).join(", ") || "(none)"}
    +
    Specialists
    +
    ${(p.specialists || []).join(", ")}
    +
    "${escapeHtml(p.rationale || "")}"
    +
    `; +} + +// --------------------------------------------------------------------------- +// SSE driver +// --------------------------------------------------------------------------- + +let currentEs = null; +// Buffers for the report-export feature — capture the full plan, trace, +// and final result during streaming so the report page can render the +// complete evidence package without re-running the agent. +let LAST_RESULT = null; +let LAST_TRACE = []; +let LAST_PLAN = null; +let LAST_PLAN_OBJ = null; +let TRACE_BUF = []; + +function ask(q) { + ensureMap(); + clearTrace(); clearMap(); + $("#plannerRow").innerHTML = ""; + setBriefingText(""); + $("#paragraph").classList.remove("streaming"); + const banner = $("#melleaBanner"); + if (banner) { banner.style.display = "none"; banner.innerHTML = ""; } + $("#reportPanel").style.display = "none"; + $("#factsPanel").style.display = "none"; + $("#reportSkel").style.display = ""; + $("#traceSkel").style.display = ""; + $("#mapLegend").style.display = "none"; + setMapLoading("Granite is planning the query…"); + $("#goBtn").disabled = true; + $("#traceMeta").textContent = "…"; + + if (currentEs) currentEs.close(); + const es = new EventSource("/api/agent/stream?q=" + encodeURIComponent(q)); + currentEs = es; + const t0 = Date.now(); + let streamBuf = ""; + let streamTimer = null; + let planStreamBuf = ""; + let planStreamTimer = null; + const ensurePlannerStream = () => { + let el = $("#plannerRow .planner-streaming"); + if (!el) { + $("#plannerRow").innerHTML = `
    `; + el = $("#plannerRow .planner-streaming"); + } + return el; + }; + const repaintPlanner = () => { + const el = $("#plannerRow .planner-streaming"); + if (el) el.textContent = planStreamBuf; + }; + const schedulePlannerRepaint = () => { + if (planStreamTimer) return; + planStreamTimer = setTimeout(() => { planStreamTimer = null; repaintPlanner(); }, 60); + }; + // Re-render the partial markdown on every token, but at most every 80 ms + // so the browser isn't murdered by a token-stream that arrives in bursts. + // Build the Sources footer alongside so it grows as new doc_ids appear. + // Briefing component owns citation indexing + chip binding via shared + // signals; we just feed it the latest text. Sources footer reacts to + // the citeIndex signal that updates each render. + const repaint = () => { + setBriefingText(streamBuf); + renderSources(); + }; + const scheduleRepaint = () => { + if (streamTimer) return; + streamTimer = setTimeout(() => { streamTimer = null; repaint(); }, 80); + }; + + es.addEventListener("plan_token", (e) => { + ensurePlannerStream(); + const d = JSON.parse(e.data); + planStreamBuf += d.delta || ""; + schedulePlannerRepaint(); + }); + es.addEventListener("plan", (e) => { + if (planStreamTimer) { clearTimeout(planStreamTimer); planStreamTimer = null; } + const planObj = JSON.parse(e.data); + LAST_PLAN_OBJ = planObj; + renderPlan(planObj); + setLegend(planObj.intent); + setMapLoading(planObj.intent === "live_now" ? null : "Resolving location…"); + $("#traceSkel").style.display = "none"; + TRACE_BUF = []; + $("#reportBtn").classList.remove("ready"); + }); + es.addEventListener("step", (e) => { + const step = JSON.parse(e.data); + TRACE_BUF.push(step); + incrementallyFillMap(step); + if (step.step === "geocode" || step.step === "nta_resolve") setMapLoading(null); + }); + es.addEventListener("step", (e) => { pushTraceStep(JSON.parse(e.data)); }); + + // Stones envelope — `stone_start` and `stone_done` events bracket + // the contiguous step events of each Stone group. The current + // `` Svelte build doesn't yet render parent/child rows; + // we accumulate Stone markers in TRACE_BUF for the auditable report, + // and surface a lightweight badge on the trace component so users + // can see Cornerstone / Keystone / Touchstone / Lodestone / Capstone + // lighting up sequentially. The full collapsible parent-row UI + // lands once the trace component is rebuilt against this event + // vocabulary. + es.addEventListener("stone_start", (e) => { + const stone = JSON.parse(e.data); + TRACE_BUF.push({ _stone: "start", ...stone }); + const trace = $("#trace"); + if (trace && typeof trace.markStoneStart === "function") { + trace.markStoneStart(stone); + } + }); + es.addEventListener("stone_done", (e) => { + const stone = JSON.parse(e.data); + TRACE_BUF.push({ _stone: "done", ...stone }); + const trace = $("#trace"); + if (trace && typeof trace.markStoneDone === "function") { + trace.markStoneDone(stone); + } + }); + let currentAttempt = 0; + es.addEventListener("token", (e) => { + const d = JSON.parse(e.data); + if (!streamBuf || (d.attempt != null && d.attempt !== currentAttempt)) { + // First token of a (possibly new) attempt → reveal panel, reset + // buffer if Mellea moved to a reroll. + if (d.attempt != null && d.attempt !== currentAttempt) { + currentAttempt = d.attempt; + streamBuf = ""; + } + $("#reportSkel").style.display = "none"; + $("#reportPanel").style.display = ""; + $("#paragraph").classList.add("streaming"); + } + streamBuf += d.delta || ""; + scheduleRepaint(); + }); + // Mellea per-attempt outcome — render a small banner above the briefing + // when a reroll is about to start so the user knows the model is + // self-correcting (and what failed). + es.addEventListener("mellea_attempt", (e) => { + const d = JSON.parse(e.data); + const banner = $("#melleaBanner"); + if (!banner) return; + if (d.failed && d.failed.length) { + banner.className = "mellea-banner reroll"; + banner.innerHTML = `↻ Mellea reroll — attempt ${(d.attempt|0)+1} failed: ${d.failed.join(", ")}. Re-drafting…`; + banner.style.display = ""; + } else { + banner.className = "mellea-banner pass"; + banner.innerHTML = `✓ Mellea — all 4 grounding requirements satisfied`; + banner.style.display = ""; + } + }); + es.addEventListener("final", (e) => { + const d = JSON.parse(e.data); + const dt = ((Date.now() - t0) / 1000).toFixed(1); + $("#traceMeta").textContent = `${dt}s`; + setMapLoading(null); + $("#reportSkel").style.display = "none"; + $("#paragraph").classList.remove("streaming"); + if (d.paragraph) { + $("#reportPanel").style.display = ""; + streamBuf = d.paragraph; + if (streamTimer) { clearTimeout(streamTimer); streamTimer = null; } + repaint(); + renderBriefHead(d); + } + renderFacts(d); + fillMapForFinal(d); + // Stash everything needed for the auditable-report page. + LAST_RESULT = { query: q, finishedAt: new Date().toISOString(), + wallSeconds: Number(dt), result: d }; + LAST_TRACE = TRACE_BUF.slice(); + LAST_PLAN = LAST_PLAN_OBJ; + $("#reportBtn").classList.add("ready"); + }); + es.addEventListener("error", () => {}); + es.addEventListener("done", () => { es.close(); $("#goBtn").disabled = false; }); +} + +// --------------------------------------------------------------------------- +// wire +// --------------------------------------------------------------------------- + +// Bind form/sample handlers FIRST so a throw in ensureMap() (e.g. a +// WebGL init failure) can't strand the user with a dead "Ask" button. +$("#agentForm").addEventListener("submit", (e) => { + e.preventDefault(); + const q = $("#q").value.trim(); + if (q) ask(q); +}); +document.querySelectorAll(".sample-btn").forEach(b => { + b.addEventListener("click", () => { $("#q").value = b.dataset.q; ask(b.dataset.q); }); +}); +try { ensureMap(); } catch (e) { console.error("ensureMap failed:", e); } + +// Backend hardware pill: fetches /api/backend, renders " · " +// and a state color (green=primary up, amber=fallback active, red=down). +// Refreshes every 60s so a flipped droplet shows up without a page reload. +async function refreshBackendPill() { + const pill = document.getElementById("backendPill"); + const text = document.getElementById("backendPillText"); + if (!pill || !text) return; + try { + const r = await fetch("/api/backend", { cache: "no-store" }); + if (!r.ok) throw new Error("status " + r.status); + const info = await r.json(); + const onFallback = info.reachable === false && !!info.fallback_engine; + const engine = onFallback ? info.fallback_engine : info.engine; + const hw = onFallback ? "fallback" : info.hardware; + text.textContent = `${hw} · Granite 4.1 / ${engine}`; + pill.dataset.state = + info.reachable ? "ok" : + onFallback ? "fallback" : "down"; + const detail = info.vllm_base_url + ? `Primary: ${info.engine} @ ${info.vllm_base_url}` + : `Engine: ${info.engine}`; + pill.title = info.reachable + ? `${detail} — reachable. No vendor LLM is contacted.` + : onFallback + ? `${detail} unreachable; running on ${info.fallback_engine} fallback.` + : `${detail} — UNREACHABLE.`; + } catch (e) { + text.textContent = "backend unknown"; + pill.dataset.state = "down"; + pill.title = "Could not query /api/backend: " + e.message; + } +} +refreshBackendPill(); +setInterval(refreshBackendPill, 60000); + +// Subscribe to the shared highlight signal so vanilla-rendered citation +// chips in the briefing prose mirror the highlight state driven by the +// Lit (and vice versa). +(async () => { + const { highlightedDocId } = await import("/static/components/signals.js"); + const apply = () => { + const id = highlightedDocId.get(); + document.querySelectorAll("#paragraph .cite").forEach(c => { + c.classList.toggle("hl", c.dataset.srcId === id); + }); + }; + // Lit-labs/signals exposes a subscribe / effect — try both shapes. + if (typeof highlightedDocId.subscribe === "function") { + highlightedDocId.subscribe(apply); + } else { + // Polyfill: poll on mutation. Cheap; signal updates are rare. + const orig = highlightedDocId.set.bind(highlightedDocId); + highlightedDocId.set = (v) => { orig(v); apply(); }; + } +})(); + +// "Generate auditable report" — snapshots the live map, packs the full +// evidence (query / plan / per-specialist trace / final result / per-source +// vintages / labels / urls), parks it in sessionStorage, opens /report. +$("#reportBtn").addEventListener("click", () => { + if (!LAST_RESULT) return; + let mapPng = null; + try { + if (map && map.loaded()) { + // preserveDrawingBuffer:false would force a one-frame render here + map.triggerRepaint(); + mapPng = map.getCanvas().toDataURL("image/png"); + } + } catch (e) { + console.warn("map snapshot failed", e); + } + const pkg = { + ...LAST_RESULT, + plan: LAST_PLAN, + trace: LAST_TRACE, + mapPng, + sourceLabels: SOURCE_LABELS, + sourceUrls: SOURCE_URLS, + sourceVintages: SOURCE_VINTAGES, + stepLabels: STEP_LABELS, + }; + try { + sessionStorage.setItem("riprap_report", JSON.stringify(pkg)); + window.open("/report", "_blank"); + } catch (e) { + alert("Could not stash report payload (storage may be full): " + e.message); + } +}); diff --git a/web/static/app.js b/web/static/app.js new file mode 100644 index 0000000000000000000000000000000000000000..e7b2de1b41be6e1dbb51046ad300ead641fdeac0 --- /dev/null +++ b/web/static/app.js @@ -0,0 +1,929 @@ +// Riprap web client — subscribes to SSE, lights up FSM steps, renders the report. + +const STEP_LABELS = { + geocode: ["Geocode (DCP Geosearch)", "address → lat/lon, BBL"], + sandy_inundation: ["Sandy Inundation (NYC OD)", "empirical 2012 extent"], + dep_stormwater: ["DEP Stormwater Maps", "pluvial scenarios + 2080 SLR"], + floodnet: ["FloodNet sensor network", "live ultrasonic depth sensors"], + nyc311: ["NYC 311 archive", "flood complaints in buffer"], + noaa_tides: ["NOAA Tides & Currents (live)", "Battery / Kings Pt / Sandy Hook water level"], + nws_alerts: ["NWS Public Alerts (live)", "active flood-relevant alerts at point"], + nws_obs: ["NWS METAR observation (live)", "nearest ASOS recent precipitation"], + ttm_forecast: ["Granite TTM r2 (TimeSeries)", "9.6h surge-residual nowcast at the Battery"], + microtopo_lidar: ["LiDAR terrain (DEM + TWI + HAND)", "USGS 3DEP DEM + whitebox-workflows hydrology"], + ida_hwm_2021: ["Ida 2021 high-water marks", "USGS empirical post-event extent"], + prithvi_eo_v2: ["Prithvi-EO 2.0 (300M, NASA/IBM)", "Sen1Floods11 satellite water segmentation"], + rag_granite_embedding: ["Granite Embedding 278M (RAG)", "policy corpus retrieval"], + reconcile_granite41: ["Granite 4.1 reconcile (local)", "document-grounded synthesis"], +}; + +const STEPS_ORDER = [ + "geocode", "sandy_inundation", "dep_stormwater", "floodnet", "nyc311", + "noaa_tides", "nws_alerts", "nws_obs", "ttm_forecast", + "microtopo_lidar", "ida_hwm_2021", "prithvi_eo_v2", + "rag_granite_embedding", "reconcile_granite41", +]; + +const $ = (s) => document.querySelector(s); + +let evtSrc = null; +let map = null; +let mapInit = false; + +const MAP_STYLE = { + version: 8, + sources: { + carto: { + type: "raster", + tiles: ["https://a.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png"], + tileSize: 256, + attribution: "© OpenStreetMap contributors © CARTO", + }, + }, + layers: [ + { id: "bg", type: "background", paint: { "background-color": "#fafbfd" } }, + { id: "carto", type: "raster", source: "carto" }, + ], +}; + +function ensureMap() { + if (mapInit) return; + mapInit = true; + map = new maplibregl.Map({ + container: "map", + style: MAP_STYLE, + center: [-74.0, 40.72], + zoom: 10, + attributionControl: { compact: true }, + }); + map.addControl(new maplibregl.NavigationControl({ visualizePitch: false }), "top-right"); + + map.on("load", async () => { + // Sandy + DEP layers — empty until first query (we clip per-address) + map.addSource("sandy", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ + id: "sandy-fill", type: "fill", source: "sandy", + paint: { "fill-color": "#fc5d52", "fill-opacity": 0.28 }, + }); + map.addLayer({ + id: "sandy-line", type: "line", source: "sandy", + paint: { "line-color": "#fc5d52", "line-width": 0.6, "line-opacity": 0.6 }, + }); + + map.addSource("dep", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ + id: "dep-fill", type: "fill", source: "dep", + paint: { + "fill-color": [ + "match", ["get", "Flooding_Category"], + 1, "#568adf", 2, "#1642DF", 3, "#031553", "#568adf", + ], + "fill-opacity": 0.32, + }, + }); + + // Prithvi-EO 2.0 satellite water polygons. Visually distinct from the + // modeled DEP/Sandy layers — teal outline + low fill says "what the + // satellite saw" not "what FEMA/DEP modeled". + map.addSource("prithvi", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ + id: "prithvi-fill", type: "fill", source: "prithvi", + paint: { "fill-color": "#0d9488", "fill-opacity": 0.18 }, + }); + map.addLayer({ + id: "prithvi-line", type: "line", source: "prithvi", + paint: { "line-color": "#0d9488", "line-width": 1.2, "line-opacity": 0.85 }, + }); + + // empty floodnet + addr sources, populated per query + map.addSource("floodnet", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ + id: "floodnet-circles", type: "circle", source: "floodnet", + paint: { + "circle-radius": 6, + "circle-color": ["case", [">", ["get", "n_events_3y"], 0], "#fc5d52", "#1a8754"], + "circle-stroke-color": "#ffffff", + "circle-stroke-width": 1.8, + }, + }); + map.on("click", "floodnet-circles", (e) => { + const f = e.features[0]; + const p = f.properties; + new maplibregl.Popup() + .setLngLat(f.geometry.coordinates) + .setHTML(`${p.name}
    ${p.street}
    events 3y: ${p.n_events_3y}
    peak: ${p.peak_depth_mm} mm`) + .addTo(map); + }); + + map.addSource("addr", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ + id: "addr-marker", type: "circle", source: "addr", + paint: { + "circle-radius": 9, + "circle-color": "#1642DF", + "circle-stroke-color": "#ffffff", + "circle-stroke-width": 2.5, + }, + }); + }); +} + +async function updateMapForResult(geo) { + ensureMap(); + if (!map.loaded()) { + await new Promise(res => map.once("load", res)); + } + // address marker + map.getSource("addr").setData({ + type: "FeatureCollection", + features: [{ + type: "Feature", + geometry: { type: "Point", coordinates: [geo.lon, geo.lat] }, + properties: { address: geo.address }, + }], + }); + // load all per-address layers in parallel + const url = (p) => `${p}?lat=${geo.lat}&lon=${geo.lon}&r=1500`; + const [sandy, dep, prithvi, fn] = await Promise.all([ + fetch(url("/api/layers/sandy")).then(r => r.json()).catch(() => null), + fetch(url("/api/layers/dep_extreme_2080")).then(r => r.json()).catch(() => null), + fetch(url("/api/layers/prithvi_water")).then(r => r.json()).catch(() => null), + fetch(`/api/floodnet_near?lat=${geo.lat}&lon=${geo.lon}&r=1000`).then(r => r.json()).catch(() => null), + ]); + if (sandy) map.getSource("sandy").setData(sandy); + if (dep) map.getSource("dep").setData(dep); + if (prithvi) map.getSource("prithvi").setData(prithvi); + if (fn) map.getSource("floodnet").setData(fn); + + // Hide the Prithvi legend item when no polygons render here. The + // model only marks satellite-observed water bodies — for landlocked + // addresses there's nothing to draw, and an empty legend entry would + // confuse rather than inform. + const prithviLegend = document.querySelector(".legend .sw.prithvi"); + if (prithviLegend) { + const hasPrithvi = prithvi && (prithvi.features || []).length > 0; + prithviLegend.parentElement.style.display = hasPrithvi ? "" : "none"; + } + + map.flyTo({ center: [geo.lon, geo.lat], zoom: 14, speed: 1.2 }); +} + +function resetUI(query) { + $("#trace").classList.remove("hidden"); + $("#report").classList.add("hidden"); + $("#meta").classList.add("hidden"); + $("#paragraph").innerHTML = ""; + const kf = $("#keyFindings"); if (kf) kf.innerHTML = ""; + const ec = $("#evidenceCards"); if (ec) ec.innerHTML = ""; + const pl = $("#policyList"); if (pl) pl.innerHTML = ""; + const ps = $("#policySection"); if (ps) ps.classList.add("hidden"); + const s = $("#sources"); if (s) s.innerHTML = ""; + $("#addr").innerHTML = ""; + CITE_INDEX = {}; + + const ul = $("#steps"); + ul.innerHTML = ""; + for (const sid of STEPS_ORDER) { + const [lbl, hint] = STEP_LABELS[sid] || [sid, ""]; + const li = document.createElement("li"); + li.id = "step-" + sid; + li.className = "pending"; + li.innerHTML = ` + +
    +
    ${lbl}
    +
    ${hint}
    +
    + `; + ul.appendChild(li); + } + // mark first one running + $("#step-" + STEPS_ORDER[0]).classList.replace("pending", "running"); +} + +function markStep(stepId, ev) { + const li = document.getElementById("step-" + stepId); + if (!li) return; + li.className = ev.ok ? "ok" : "err"; + li.querySelector(".icon").textContent = ev.ok ? "✓" : "✗"; + if (ev.elapsed_s != null) { + li.querySelector(".time").textContent = ev.elapsed_s.toFixed(2) + "s"; + } + if (ev.result) { + let div = li.querySelector(".result"); + if (!div) { + div = document.createElement("div"); + div.className = "result"; + li.appendChild(div); + } + div.textContent = formatResult(ev.result); + } else if (ev.err) { + let div = li.querySelector(".result"); + if (!div) { + div = document.createElement("div"); + div.className = "result"; + li.appendChild(div); + } + div.textContent = "error: " + ev.err; + } + + // mark next pending step running + const idx = STEPS_ORDER.indexOf(stepId); + if (idx >= 0 && idx + 1 < STEPS_ORDER.length) { + const next = document.getElementById("step-" + STEPS_ORDER[idx + 1]); + if (next && next.classList.contains("pending")) { + next.classList.replace("pending", "running"); + } + } +} + +function formatResult(r) { + if (typeof r !== "object") return String(r); + return Object.entries(r) + .map(([k, v]) => `${k}: ${typeof v === "object" ? JSON.stringify(v) : v}`) + .join(" · "); +} + +// Map doc_id -> footnote number for the current report; built fresh each query +let CITE_INDEX = {}; + +function rewriteCitations(text) { + // Replace [doc_id] with N using the + // CITE_INDEX. doc_ids not in the index get their first appearance assigned. + return text.replace(/\[([a-z0-9_]+)\]/gi, (_, d) => { + const norm = d.toLowerCase(); + if (CITE_INDEX[norm] == null) { + CITE_INDEX[norm] = Object.keys(CITE_INDEX).length + 1; + } + const n = CITE_INDEX[norm]; + return `${n}`; + }); +} + +function escapeHtml(s) { + return s.replace(/&/g, "&").replace(//g, ">"); +} + +function renderMarkdown(text) { + // Tiny safe markdown subset: + // **Header.** (on its own line) ->

    Header

    + // **inline bold** (mid-sentence) -> ... + // We escape HTML first to defang any injection in model output. + const lines = text.split("\n"); + const out = []; + let bodyBuf = []; + const flushBody = () => { + if (!bodyBuf.length) return; + const body = bodyBuf.join(" ").trim(); + bodyBuf = []; + if (!body) return; + const safe = escapeHtml(body) + .replace(/\*\*([^*]+)\*\*/g, "$1"); + out.push(`

    ${safe}

    `); + }; + const headerRe = /^\s*\*\*([A-Z][A-Za-z\s/]+)\.\*\*\s*$/; + for (const line of lines) { + const m = line.match(headerRe); + if (m) { + flushBody(); + out.push(`

    ${escapeHtml(m[1])}

    `); + } else { + bodyBuf.push(line); + } + } + flushBody(); + return out.join(""); +} + +function renderParagraph(text) { + // Build markdown structure FIRST, then rewrite citations inside. Citations + // are bracketed tokens like [sandy] which don't conflict with our markdown. + $("#paragraph").innerHTML = rewriteCitations(renderMarkdown(text)); +} + +const SOURCE_LABELS = { + geocode: "NYC DCP Geosearch", + sandy: "NYC OpenData 5xsi-dfpx — Sandy 2012 inundation", + dep_extreme_2080: "NYC DEP Stormwater — Extreme 3.66 in/hr + 2080 SLR", + dep_moderate_2050: "NYC DEP Stormwater — Moderate 2.13 in/hr + 2050 SLR", + dep_moderate_current: "NYC DEP Stormwater — Moderate 2.13 in/hr current", + floodnet: "FloodNet NYC — live ultrasonic sensor network", + nyc311: "NYC 311 (Socrata erm2-nwe9) — flood descriptors", + microtopo: "USGS 3DEP 30 m DEM via py3dep", + ida_hwm: "USGS STN — Hurricane Ida 2021 HWMs (Event 312, NY)", + prithvi_water: "Prithvi-EO 2.0 (300M, NASA/IBM) — Hurricane Ida 2021 pre/post HLS diff (Aug 25 vs Sep 2)", + rag_dep_2013: "NYC DEP Wastewater Resiliency Plan (2013)", + rag_nycha: "NYCHA — Flood Resilience: Lessons Learned", + rag_coned: "Con Edison Climate Change Resilience Plan (Case 22-E-0222)", + rag_mta: "MTA Climate Resilience Roadmap (Oct 2025)", + rag_comptroller: "NYC Comptroller — \"Is NYC Ready for Rain?\" (2024)", + noaa_tides: "NOAA CO-OPS Tides & Currents — live water level (6-min)", + nws_alerts: "NWS Public Alerts API — active flood-relevant alerts", + nws_obs: "NWS Station Observations — nearest ASOS hourly METAR", + ttm_forecast: "Granite TimeSeries TTM r2 — surge-residual nowcast (Ekambaram et al. 2024, NeurIPS)", +}; + +// ---------------------------------------------------------------------- +// CIVIC ASSESSMENT REPORT — header strip, tier badge, key findings, +// evidence cards, policy quotes, methodology footer. +// ---------------------------------------------------------------------- + +// Tier meta — uses the new composite breakpoints, mirrors app/score.py. +// Tooltip copy explicitly states scope: exposure, not damage probability. +function tierMeta(tier) { + if (tier === 1) return {tier: 1, label: "High exposure", + help: "Multiple sub-indices saturated; empirical and/or modeled scenarios both indicate substantial exposure. Not a damage probability."}; + if (tier === 2) return {tier: 2, label: "Elevated exposure", + help: "At least one sub-index near saturation; significant overlap with empirical or modeled scenarios. Not a damage probability."}; + if (tier === 3) return {tier: 3, label: "Moderate exposure", + help: "Partial signals across categories; scenario- or neighborhood-specific exposure. Not a damage probability."}; + if (tier === 4) return {tier: 4, label: "Limited exposure", + help: "A single contextual signal; no positive scenario hits."}; + return {tier: 0, label: "No flagged exposure", + help: "No positive flood signal across the assessed sources."}; +} + +// ---- Score computation: mirrors app/score.py.composite() exactly --------- +// Three thematic sub-indices, equal weights within each, max-empirical +// floor. Live signals (NWS alerts, surge, precip) are NOT in this score +// per IPCC AR6 WG II's distinction between exposure (static) and event +// occurrence (live). +const REG_W = { + fema_1pct: 1.0, fema_02pct: 0.5, + dep_moderate_2050: 0.75, dep_extreme_2080: 0.50, dep_tidal_2050: 0.75, +}; +const HYD_W = { + hand_band: 1.0, twi_quartile: 0.5, + elev_pct_200m_inv: 0.5, elev_pct_750m_inv: 0.5, basin_relief_band: 0.25, +}; +const EMP_W = { + sandy: 1.0, + ida_hwm_within_100m: 1.0, ida_hwm_within_800m: 0.5, + prithvi_polygon: 0.75, complaints_band: 0.75, floodnet_trigger: 0.75, +}; + +const handBand = (h) => h == null ? 0 : (h < 1 ? 1 : h < 3 ? 0.66 : h < 10 ? 0.33 : 0); +const pctInvBand = (p) => p == null ? 0 : (p < 10 ? 1 : p < 25 ? 0.66 : p < 50 ? 0.33 : 0); +const twiBand = (t) => t == null ? 0 : (t >= 12 ? 1 : t >= 10 ? 0.66 : t >= 8 ? 0.33 : 0); +const reliefBand = (r) => r == null ? 0 : (r >= 8 ? 1 : r >= 4 ? 0.66 : r >= 2 ? 0.33 : 0); +const complBand = (n) => !n ? 0 : (n >= 10 ? 1 : n >= 3 ? 0.66 : 0.33); +const sumW = (w) => Object.values(w).reduce((a, b) => a + b, 0); + +function computeComposite(ev) { + const dep = ev.dep || {}; + const mt = ev.microtopo || {}; + const ida = ev.ida_hwm || {}; + const pw = ev.prithvi_water || {}; + + // Build the signal dict in the shape app/score.py expects. + const s = { + // Regulatory + fema_1pct: false, // not yet wired in this build + fema_02pct: false, + dep_moderate_2050: (dep.dep_moderate_2050?.depth_class || 0) > 0, + dep_extreme_2080: (dep.dep_extreme_2080?.depth_class || 0) > 0, + dep_tidal_2050: false, // tidal scenario not in current FSM + // Hydrological + hand_m: mt.hand_m, + twi: mt.twi, + rel_elev_pct_200m: mt.rel_elev_pct_200m, + rel_elev_pct_750m: mt.rel_elev_pct_750m, + basin_relief_m: mt.basin_relief_m, + // Empirical + sandy: !!ev.sandy, + ida_hwm_within_100m: (ida.nearest_dist_m != null && ida.nearest_dist_m < 100) || + (ida.n_within_radius || 0) > 0 && (ida.nearest_dist_m || 9999) < 100, + ida_hwm_within_800m: (ida.n_within_radius || 0) > 0, + prithvi_polygon: !!pw.inside_water_polygon, + complaints_count: ev.nyc311?.n || 0, + floodnet_trigger: (ev.floodnet?.n_flood_events_3y || 0) > 0, + }; + + // Regulatory sub-index (binary signals) + let regRaw = 0; + for (const [k, w] of Object.entries(REG_W)) regRaw += s[k] ? w : 0; + const reg = regRaw / sumW(REG_W); + + // Hydrological sub-index (banded continuous) + const hydBands = { + hand_band: handBand(s.hand_m), + twi_quartile: twiBand(s.twi), + elev_pct_200m_inv: pctInvBand(s.rel_elev_pct_200m), + elev_pct_750m_inv: pctInvBand(s.rel_elev_pct_750m), + basin_relief_band: reliefBand(s.basin_relief_m), + }; + let hydRaw = 0; + for (const [k, w] of Object.entries(HYD_W)) hydRaw += w * hydBands[k]; + const hyd = hydRaw / sumW(HYD_W); + + // Empirical sub-index + const empVals = { + sandy: s.sandy ? 1 : 0, + ida_hwm_within_100m: s.ida_hwm_within_100m ? 1 : 0, + ida_hwm_within_800m: s.ida_hwm_within_800m ? 1 : 0, + prithvi_polygon: s.prithvi_polygon ? 1 : 0, + complaints_band: complBand(s.complaints_count), + floodnet_trigger: s.floodnet_trigger ? 1 : 0, + }; + let empRaw = 0; + for (const [k, w] of Object.entries(EMP_W)) empRaw += w * empVals[k]; + const emp = empRaw / sumW(EMP_W); + + const composite = reg + hyd + emp; + + // Tier breakpoints (mirror score.py) + let tier = 0; + if (composite >= 1.50) tier = 1; + else if (composite >= 1.00) tier = 2; + else if (composite >= 0.50) tier = 3; + else if (composite >= 0.01) tier = 4; + + // Max-empirical floor: Sandy or HWM-within-100m → tier ≤ 2 + const floorApplied = !!(s.sandy || s.ida_hwm_within_100m); + if (floorApplied && (tier === 0 || tier > 2)) tier = 2; + + return { + subindices: {regulatory: reg, hydrological: hyd, empirical: emp}, + composite, tier, floorApplied, + }; +} + +// Backward-compat shim: places that called computeScore() now read .tier. +function computeScore(ev) { return computeComposite(ev).tier; } + +function renderHeader(ev) { + const geo = ev.geocode || {}; + $("#reportAddr").textContent = geo.address || "(unresolved)"; + $("#reportBoro").textContent = geo.borough || "—"; + $("#reportBbl").textContent = geo.bbl || "—"; + $("#reportTs").textContent = new Date().toISOString().slice(0,10); +} + +function renderTier(ev) { + const c = computeComposite(ev); + const m = tierMeta(c.tier); + const badge = $("#tierBadge"); + badge.className = "tier-badge t-" + m.tier; + $("#tierNum").textContent = m.tier; + const floor = c.floorApplied ? " · empirical floor" : ""; + $("#tierLabel").textContent = `Tier ${m.tier} — ${m.label}${floor}`; + $("#tierHelp").textContent = m.help; +} + +function renderKeyFindings(ev) { + const dl = $("#keyFindings"); + dl.innerHTML = ""; + const rows = []; + + rows.push(["Sandy 2012 zone", + ev.sandy ? "INSIDE" : "outside", + ev.sandy ? "hit" : "miss"]); + + const dep = ev.dep || {}; + const dHit = Object.entries(dep).find(([_, v]) => (v.depth_class || 0) > 0); + if (dHit) { + const [scen, v] = dHit; + const lbl = scen.replace("dep_", "").replace(/_/g, " ").toUpperCase(); + rows.push(["DEP scenario", `${lbl} — ${v.depth_label}`, "hit"]); + } else { + rows.push(["DEP scenarios", "outside all 3", "miss"]); + } + + const mt = ev.microtopo; + if (mt) { + rows.push(["Elevation", + `${mt.point_elev_m} m above sea level`, ""]); + if (mt.hand_m != null) { + rows.push(["Height Above Drainage", `${mt.hand_m} m (HAND)`, ""]); + } + if (mt.twi != null) { + rows.push(["Topographic Wetness Index", + `${mt.twi} (${mt.twi >= 14 ? "very high" : mt.twi >= 10 ? "high" : mt.twi >= 6 ? "moderate" : "low"})`, ""]); + } + } + + const fn = ev.floodnet; + if (fn && fn.n_sensors > 0) { + rows.push(["FloodNet (3 yr)", + `${fn.n_flood_events_3y} events across ${fn.n_sensors} sensors`, + fn.n_flood_events_3y > 0 ? "hit" : ""]); + } + + const ida = ev.ida_hwm; + if (ida && ida.n_within_radius > 0) { + const ht = ida.max_height_above_gnd_ft != null + ? `, max ${ida.max_height_above_gnd_ft} ft above ground` : ""; + rows.push(["Hurricane Ida 2021 HWMs", + `${ida.n_within_radius} within ${ida.radius_m} m${ht}`, "hit"]); + } + + const pw = ev.prithvi_water; + if (pw && pw.nearest_distance_m != null) { + rows.push(["Prithvi-EO Ida 2021", + pw.inside_water_polygon + ? "INSIDE inundation polygon" + : `${pw.nearest_distance_m} m to nearest inundation polygon`, + pw.inside_water_polygon ? "hit" : ""]); + } + + const c311 = ev.nyc311; + if (c311 && c311.n > 0) { + rows.push(["311 flood complaints", + `${c311.n} within ${c311.radius_m} m, last ${c311.years} yr`, + c311.n >= 5 ? "hit" : ""]); + } + + dl.innerHTML = rows.map(([k, v, cls]) => + `
    ${k}
    ${v}` + ).join(""); +} + +function evCard({key, title, flag, rows, sourceText, sourceUrl, vintage, collapsed}) { + // flag: "hit" | "note" | "miss" + const inner = rows.map(([k, v]) => + `
    ${k}
    ${v}
    `).join(""); + const foot = sourceUrl + ? `
    ${sourceText}${vintage ? " · " + vintage : ""}` + : `${sourceText}${vintage ? " · " + vintage : ""}`; + const cls = "ec" + (collapsed ? " collapsed" : ""); + return `
    +
    +
    ${title}
    +
    +
    +
    ${inner}
    +
    ${foot}
    +
    `; +} + +function renderEvidence(ev) { + const cards = []; + + if (ev.sandy != null) { + cards.push(evCard({ + key: "sandy", title: "Sandy 2012 inundation", + flag: ev.sandy ? "hit" : "miss", + rows: [ + ["Inside extent", ev.sandy ? "yes" : "no"], + ["Reference event", "Hurricane Sandy, 29-30 Oct 2012"], + ], + sourceText: "NYC OpenData 5xsi-dfpx", + sourceUrl: "https://data.cityofnewyork.us/Environment/Sandy-Inundation-Zone/uyj8-7rv5", + vintage: "empirical 2012 extent", + collapsed: !ev.sandy, + })); + } + + const dep = ev.dep || {}; + const depRows = []; + for (const [k, v] of Object.entries(dep)) { + const label = k.replace("dep_", "").replace(/_/g, " "); + depRows.push([label, + v.depth_class > 0 ? `${v.depth_label}` : "outside"]); + } + if (depRows.length) { + const anyHit = Object.values(dep).some(v => (v.depth_class || 0) > 0); + cards.push(evCard({ + key: "dep", title: "DEP Stormwater scenarios", + flag: anyHit ? "hit" : "miss", + rows: depRows, + sourceText: "NYC DEP via NYC OpenData 9i7c-xyvv", + sourceUrl: "https://data.cityofnewyork.us/Environment/NYC-Stormwater-Flood-Maps/9i7c-xyvv", + vintage: "modeled, 2021 release", + collapsed: !anyHit, + })); + } + + const fn = ev.floodnet; + if (fn && fn.n_sensors > 0) { + const peak = fn.peak_event; + const rows = [ + ["Sensors within 600 m", String(fn.n_sensors)], + ["Flood events, last 3 yr", String(fn.n_flood_events_3y)], + ]; + if (peak && peak.max_depth_mm) { + rows.push(["Peak event", `${peak.max_depth_mm} mm depth at ${peak.deployment_id}`]); + rows.push(["Peak date", (peak.start_time || "").slice(0, 10)]); + } + cards.push(evCard({ + key: "floodnet", title: "FloodNet sensor network", + flag: fn.n_flood_events_3y > 0 ? "hit" : "note", + rows, + sourceText: "FloodNet NYC (NYU/CUNY/MOCEJ)", + sourceUrl: "https://www.floodnet.nyc/", + vintage: "live, queried per request", + collapsed: false, + })); + } + + const ida = ev.ida_hwm; + if (ida && ida.n_within_radius > 0) { + const rows = [ + ["HWMs within 800 m", String(ida.n_within_radius)], + ]; + if (ida.max_height_above_gnd_ft != null) + rows.push(["Max above-ground height", `${ida.max_height_above_gnd_ft} ft`]); + if (ida.max_elev_ft != null) + rows.push(["Max HWM elevation", `${ida.max_elev_ft} ft`]); + if (ida.nearest_dist_m != null) + rows.push(["Nearest HWM site", `${ida.nearest_site || "—"} (${ida.nearest_dist_m} m)`]); + cards.push(evCard({ + key: "ida_hwm", title: "Hurricane Ida 2021 high-water marks", + flag: "hit", rows, + sourceText: "USGS Short-Term Network, Event 312 (NY)", + sourceUrl: "https://stn.wim.usgs.gov/", + vintage: "post-event survey, Sep 2021", + collapsed: false, + })); + } + + const mt = ev.microtopo; + if (mt) { + const rows = [ + ["Elevation", `${mt.point_elev_m} m`], + ["Lower than (200 m)", `${mt.rel_elev_pct_200m}% of cells`], + ["Lower than (750 m)", `${mt.rel_elev_pct_750m}% of cells`], + ["Basin relief (750 m)", `${mt.basin_relief_m} m`], + ]; + if (mt.hand_m != null) rows.push(["HAND", `${mt.hand_m} m`]); + if (mt.twi != null) rows.push(["TWI", String(mt.twi)]); + cards.push(evCard({ + key: "microtopo", title: "LiDAR-derived terrain (DEM + TWI + HAND)", + flag: "note", rows, + sourceText: "USGS 3DEP DEM via py3dep · whitebox-workflows hydrology", + sourceUrl: "https://www.usgs.gov/3d-elevation-program", + vintage: "DEM 30 m, hydro-conditioned", + collapsed: false, + })); + } + + const pw = ev.prithvi_water; + if (pw && pw.nearest_distance_m != null) { + const rows = [ + ["Inside Ida-attributable polygon", pw.inside_water_polygon ? "yes" : "no"], + ["Nearest inundation polygon", `${pw.nearest_distance_m} m`], + ["Inundation polygons within 500 m", String(pw.n_polygons_within_500m)], + ["Pre-event scene", "HLS T18TWK 2021-08-25 (3% cloud)"], + ["Post-event scene", "HLS T18TWK 2021-09-02 (1% cloud, ~12 h after Ida peak)"], + ]; + cards.push(evCard({ + key: "prithvi_water", + title: "Prithvi-EO 2.0 — Hurricane Ida flood inundation", + flag: pw.inside_water_polygon ? "hit" : "note", rows, + sourceText: "NASA / IBM Prithvi-EO-2.0-300M-TL-Sen1Floods11 (Apache-2.0, 300M params, run via TerraTorch on HLS Sentinel-2)", + sourceUrl: "https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11", + vintage: "Polygons = post-event water minus pre-event water. Sub-surface flooding (subway / basement) not visible to optical satellites.", + collapsed: false, + })); + } + + const c311 = ev.nyc311; + if (c311 && c311.n > 0) { + const rows = [ + ["Total complaints", String(c311.n)], + ["Buffer", `${c311.radius_m} m`], + ["Window", `${c311.years} years`], + ]; + if (c311.by_descriptor) { + const top = Object.entries(c311.by_descriptor).slice(0, 3) + .map(([k, v]) => `${v}× ${k.replace(/\s*\(.+?\)\s*$/, "").replace(/\s*\(SA\d?\)?$/, "")}`) + .join("; "); + if (top) rows.push(["Top descriptors", top]); + } + if (c311.by_year) { + const yrs = Object.entries(c311.by_year).map(([y, n]) => `${y}: ${n}`).join(", "); + rows.push(["By year", yrs]); + } + cards.push(evCard({ + key: "nyc311", title: "NYC 311 flood complaints", + flag: c311.n >= 5 ? "hit" : "note", rows, + sourceText: "NYC 311 (Socrata erm2-nwe9)", + sourceUrl: "https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9", + vintage: "live, last 5 years", + collapsed: false, + })); + } + + // Live signals — refresh every query, may produce nothing on a calm day. + const tides = ev.noaa_tides; + if (tides && tides.observed_ft_mllw != null) { + const rows = [ + ["Gauge", `${tides.station_name} (${tides.station_id})`], + ["Distance to gauge", `${tides.distance_km} km`], + ["Observed", `${tides.observed_ft_mllw} ft above MLLW`], + ]; + if (tides.predicted_ft_mllw != null) + rows.push(["Predicted (astro tide)", `${tides.predicted_ft_mllw} ft`]); + if (tides.residual_ft != null) + rows.push(["Residual (obs − pred)", `${tides.residual_ft >= 0 ? "+" : ""}${tides.residual_ft} ft`]); + if (tides.obs_time) + rows.push(["Observation time", tides.obs_time]); + const flag = (tides.residual_ft != null && tides.residual_ft >= 1.0) ? "hit" : "note"; + cards.push(evCard({ + key: "noaa_tides", + title: "NOAA Tides & Currents — live coastal water level", + flag, rows, + sourceText: "NOAA CO-OPS API (api.tidesandcurrents.noaa.gov)", + sourceUrl: `https://tidesandcurrents.noaa.gov/stationhome.html?id=${tides.station_id}`, + vintage: "live, 6-min cadence; residual ≈ surge", + collapsed: false, + })); + } + + const al = ev.nws_alerts; + if (al && al.n_active > 0) { + const rows = [["Active flood-relevant alerts", String(al.n_active)]]; + (al.alerts || []).slice(0, 3).forEach((a, i) => { + rows.push([ + `Alert ${i + 1}`, + `${a.event} (${a.severity || "?"} / ${a.urgency || "?"}) — expires ${ + (a.expires || "").slice(0, 16) + }`, + ]); + }); + cards.push(evCard({ + key: "nws_alerts", + title: "NWS — active flood alerts at this point", + flag: "hit", rows, + sourceText: "NWS Public Alerts API (api.weather.gov)", + sourceUrl: "https://www.weather.gov/documentation/services-web-api", + vintage: "live, push-cadence (refresh on event)", + collapsed: false, + })); + } + + const obs = ev.nws_obs; + if (obs && obs.station_id && !obs.error && ( + obs.precip_last_hour_mm != null || + obs.precip_last_6h_mm != null)) { + const rows = [ + ["Nearest ASOS station", `${obs.station_name} (${obs.station_id})`], + ["Distance", `${obs.distance_km} km`], + ]; + if (obs.precip_last_hour_mm != null) + rows.push(["Precip last 1 h", `${obs.precip_last_hour_mm} mm`]); + if (obs.precip_last_3h_mm != null) + rows.push(["Precip last 3 h", `${obs.precip_last_3h_mm} mm`]); + if (obs.precip_last_6h_mm != null) + rows.push(["Precip last 6 h", `${obs.precip_last_6h_mm} mm`]); + if (obs.obs_time) + rows.push(["Observation time", obs.obs_time]); + const heavy = (obs.precip_last_hour_mm || 0) >= 10 || + (obs.precip_last_6h_mm || 0) >= 25; + cards.push(evCard({ + key: "nws_obs", + title: "NWS hourly METAR — recent precipitation", + flag: heavy ? "hit" : "note", rows, + sourceText: "NWS station observations API", + sourceUrl: `https://www.weather.gov/wrh/timeseries?site=${obs.station_id}`, + vintage: "live, ~hourly", + collapsed: false, + })); + } + + const ttm = ev.ttm_forecast; + if (ttm && ttm.available) { + const peak = ttm.forecast_peak_ft; + const rows = [ + ["Gauge", `${ttm.station_name} (NOAA ${ttm.station_id})`], + ["Recent residual", `${ttm.history_recent_ft} ft`], + ["Recent peak |residual|", `${ttm.history_peak_abs_ft} ft (last ~51 h)`], + ["Forecast peak residual", `${peak >= 0 ? "+" : ""}${peak} ft`], + ["Forecast peak time", `~${ttm.forecast_peak_minutes_ahead} min ahead (${(ttm.forecast_peak_time_utc || "").slice(11, 16)} UTC)`], + ["Threshold", `±${ttm.threshold_ft} ft (gate for emission)`], + ]; + const flag = ttm.interesting ? (Math.abs(peak) >= 0.5 ? "hit" : "note") : "miss"; + cards.push(evCard({ + key: "ttm_forecast", + title: "Granite TimeSeries TTM r2 — surge nowcast", + flag, rows, + sourceText: "IBM Granite TimeSeries TTM r2 (Ekambaram et al. 2024, NeurIPS)", + sourceUrl: "https://huggingface.co/ibm-granite/granite-timeseries-ttm-r2", + vintage: "zero-shot multivariate forecaster, ~1.5M params; runs on CPU", + collapsed: !ttm.interesting, + })); + } + + $("#evidenceCards").innerHTML = cards.join(""); +} + +function renderPolicy(ev) { + const policy = $("#policySection"); + const rag = ev.rag || []; + if (!rag.length) { policy.classList.add("hidden"); return; } + policy.classList.remove("hidden"); + const items = rag.map(h => `
  • +
    ${h.title || h.doc_id}
    +
    ${(h.text || "").replace(/^"|"$/g, "").trim()}
    +
    ${h.citation || ""}${h.page ? " · p. " + h.page : ""}
    +
  • `); + $("#policyList").innerHTML = items.join(""); +} + +function renderEnergy(ev) { + const en = ev.energy; + if (!en) return; + $("#energyLocal").textContent = `${en.local_mwh} mWh`; + $("#energyCloud").textContent = `~${en.cloud_mwh} mWh`; + $("#energyRatio").textContent = en.ratio_cloud_over_local + ? `${en.ratio_cloud_over_local}×` + : "—"; +} + +function renderEnergy(ev) { + const en = ev.energy; + if (!en) return; + const $$ = (id) => document.getElementById(id); + $$("energyLocal").textContent = `${en.local_mwh} mWh`; + $$("energyCloud").textContent = `~${en.cloud_mwh} mWh`; + $$("energyRatio").textContent = en.ratio_cloud_over_local + ? `${en.ratio_cloud_over_local}×` + : "—"; + const m = en.method || {}; + $$("energyMethod").innerHTML = + `Local: ${m.local} (q4_K_M, package power; ${m.local_source}). ` + + `Cloud: ${m.cloud} (${m.cloud_source}).`; +} + +function renderNumberedSources() { + // Render the methodology footer's
      in CITE_INDEX order so the [n] + // superscripts in the lede paragraph match. CITE_INDEX is populated + // by rewriteCitations() during renderParagraph(). + const ol = $("#sources"); + if (!ol) return; + const entries = Object.entries(CITE_INDEX).sort((a, b) => a[1] - b[1]); + ol.innerHTML = entries.map(([doc_id, n]) => + `
    1. ${SOURCE_LABELS[doc_id] || doc_id} [${doc_id}]
    2. ` + ).join(""); +} + +function renderAddress(g) { + const dl = $("#addr"); + dl.innerHTML = ""; + const rows = [ + ["address", g.address], + ["borough", g.borough || ""], + ["lat / lon", `${g.lat.toFixed(5)}, ${g.lon.toFixed(5)}`], + ["BBL", g.bbl || ""], + ["BIN", g.bin || ""], + ]; + for (const [k, v] of rows) { + if (!v) continue; + const dt = document.createElement("dt"); dt.textContent = k; + const dd = document.createElement("dd"); dd.textContent = v; + dl.appendChild(dt); dl.appendChild(dd); + } +} + +// Suggested-address chips fill the input and submit +document.querySelectorAll(".chip[data-q]").forEach((btn) => { + btn.addEventListener("click", (e) => { + e.preventDefault(); + $("#q").value = btn.getAttribute("data-q"); + $("#qform").requestSubmit(); + }); +}); + +$("#qform").addEventListener("submit", (e) => { + e.preventDefault(); + const q = $("#q").value.trim(); + if (!q) return; + if (evtSrc) evtSrc.close(); + resetUI(q); + $("#go").disabled = true; + evtSrc = new EventSource("/api/stream?q=" + encodeURIComponent(q)); + + evtSrc.addEventListener("step", (msg) => { + const ev = JSON.parse(msg.data); + markStep(ev.step, ev); + }); + evtSrc.addEventListener("final", (msg) => { + const ev = JSON.parse(msg.data); + $("#report").classList.remove("hidden"); + $("#meta").classList.remove("hidden"); + $("#map-card").classList.remove("hidden"); + // Reset citation index for this query before any citation rewriting + CITE_INDEX = {}; + if (ev.geocode) { + renderAddress(ev.geocode); + updateMapForResult(ev.geocode); + } + renderHeader(ev); + renderTier(ev); + if (ev.paragraph) renderParagraph(ev.paragraph); + renderKeyFindings(ev); + renderEvidence(ev); + renderPolicy(ev); + renderEnergy(ev); + renderNumberedSources(); + }); + evtSrc.addEventListener("done", () => { + $("#go").disabled = false; + evtSrc.close(); + }); + evtSrc.addEventListener("error", (msg) => { + console.error("SSE error", msg); + $("#go").disabled = false; + evtSrc.close(); + }); +}); diff --git a/web/static/compare.html b/web/static/compare.html new file mode 100644 index 0000000000000000000000000000000000000000..640f4122ac0dadae73ea1b5f8bd35d6b6ebdfeca --- /dev/null +++ b/web/static/compare.html @@ -0,0 +1,131 @@ + + + + + + Riprap — compare two NYC addresses + + + + + + +
      +
      +
      + Riprap + · + compare two NYC addresses, side by side +
      +
      + address + register + + local · 2× Granite 4.1 + +
      +
      +
      + +
      +
      +
      + + +
      +
      + + +
      + +
      +
      + try: + + + +
      +
      + +
      + + + + + +
      +
      +

      MapBoth addresses · shared layers

      +
      +
      + Sandy 2012 + DEP Extreme 2080 + FloodNet (no events) + FloodNet w/ events + A & B markers +
      +
      +
      + + + + +
      + +
      +
      +
      +

      Compare mode

      +

      + Two addresses, two parallel runs of the same 8-specialist FSM. + Each side renders its own trace, at-a-glance signals, cited + summary, and source list. The map shows both markers and shared + layer overlays. Both Granite 4.1 reconciliations run concurrently + via Ollama (OLLAMA_NUM_PARALLEL=2); wallclock is + comparable to a single query. +

      +
      +
      +

      Sources

      +

      + NYC Open Data · + FloodNet NYC · + NYC DCP Geosearch · + USGS STN · + USGS 3DEP +

      +
      +
      +
      + + + + diff --git a/web/static/compare.js b/web/static/compare.js new file mode 100644 index 0000000000000000000000000000000000000000..2e21b0a9dabb7a445e5bf6e0a6dca7a709319f15 --- /dev/null +++ b/web/static/compare.js @@ -0,0 +1,323 @@ +// Riprap — Compare mode. Two addresses, parallel FSM runs, shared map. + +const STEP_LABELS = { + geocode: ["Geocode (DCP Geosearch)", "address → lat/lon, BBL"], + sandy_inundation: ["Sandy Inundation (NYC OD)", "empirical 2012 extent"], + dep_stormwater: ["DEP Stormwater Maps", "pluvial scenarios + 2080 SLR"], + floodnet: ["FloodNet sensor network", "live ultrasonic depth sensors"], + nyc311: ["NYC 311 archive", "flood complaints in buffer"], + microtopo_lidar: ["LiDAR terrain (DEM + TWI + HAND)", "USGS 3DEP DEM + whitebox hydrology"], + ida_hwm_2021: ["Ida 2021 high-water marks", "USGS empirical post-event extent"], + prithvi_eo_v2: ["Prithvi-EO 2.0 (300M, NASA/IBM)", "Sen1Floods11 satellite water segmentation"], + rag_granite_embedding: ["Granite Embedding 278M (RAG)", "policy corpus retrieval"], + reconcile_granite41: ["Granite 4.1 reconcile (local)", "document-grounded synthesis"], +}; + +const STEPS_ORDER = [ + "geocode", "sandy_inundation", "dep_stormwater", "floodnet", "nyc311", + "microtopo_lidar", "ida_hwm_2021", "prithvi_eo_v2", + "rag_granite_embedding", "reconcile_granite41", +]; + +const SOURCE_LABELS = { + geocode: "NYC DCP Geosearch", + sandy: "NYC OpenData 5xsi-dfpx — Sandy 2012 inundation", + dep_extreme_2080: "NYC DEP Stormwater — Extreme 3.66 in/hr + 2080 SLR", + dep_moderate_2050: "NYC DEP Stormwater — Moderate 2.13 in/hr + 2050 SLR", + dep_moderate_current: "NYC DEP Stormwater — Moderate 2.13 in/hr current", + floodnet: "FloodNet NYC — live ultrasonic sensor network", + nyc311: "NYC 311 (Socrata erm2-nwe9) — flood descriptors", + microtopo: "USGS 3DEP 30 m DEM via py3dep", + ida_hwm: "USGS STN — Hurricane Ida 2021 HWMs (Event 312, NY)", + prithvi_water: "Prithvi-EO 2.0 (300M, NASA/IBM) Sen1Floods11 — satellite water segmentation", + rag_dep_2013: "NYC DEP Wastewater Resiliency Plan (2013)", + rag_nycha: "NYCHA — Flood Resilience: Lessons Learned", + rag_coned: "Con Edison Climate Change Resilience Plan (Case 22-E-0222)", + rag_mta: "MTA Climate Resilience Roadmap (Oct 2025)", + rag_comptroller: "NYC Comptroller — \"Is NYC Ready for Rain?\" (2024)", +}; + +const $ = (s) => document.querySelector(s); + +let evtSrc = null; +let map = null; +let mapInit = false; + +const MAP_STYLE = { + version: 8, + sources: { + carto: { + type: "raster", + tiles: ["https://a.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png"], + tileSize: 256, + attribution: "© OpenStreetMap contributors © CARTO", + }, + }, + layers: [ + { id: "bg", type: "background", paint: { "background-color": "#fafbfd" } }, + { id: "carto", type: "raster", source: "carto" }, + ], +}; + +function ensureMap() { + if (mapInit) return; + mapInit = true; + map = new maplibregl.Map({ + container: "map", + style: MAP_STYLE, + center: [-74.0, 40.72], + zoom: 10, + attributionControl: { compact: true }, + }); + map.addControl(new maplibregl.NavigationControl({ visualizePitch: false }), "top-right"); + map.on("load", () => { + for (const sideKey of ["a", "b"]) { + map.addSource("sandy_" + sideKey, { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ id: "sandy_" + sideKey + "-fill", type: "fill", source: "sandy_" + sideKey, + paint: { "fill-color": "#fc5d52", "fill-opacity": 0.22 } }); + map.addSource("dep_" + sideKey, { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ id: "dep_" + sideKey + "-fill", type: "fill", source: "dep_" + sideKey, + paint: { + "fill-color": ["match", ["get", "Flooding_Category"], + 1, "#568adf", 2, "#1642DF", 3, "#031553", "#568adf"], + "fill-opacity": 0.28 } }); + map.addSource("fn_" + sideKey, { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ id: "fn_" + sideKey + "-circles", type: "circle", source: "fn_" + sideKey, + paint: { + "circle-radius": 5, + "circle-color": ["case", [">", ["get", "n_events_3y"], 0], "#fc5d52", "#1a8754"], + "circle-stroke-color": "#ffffff", + "circle-stroke-width": 1.5, + } }); + } + map.addSource("addr_a", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ id: "addr_a-marker", type: "circle", source: "addr_a", + paint: { "circle-radius": 9, "circle-color": "#1642DF", "circle-stroke-color": "#fff", "circle-stroke-width": 2.5 } }); + map.addSource("addr_b", { type: "geojson", data: { type: "FeatureCollection", features: [] } }); + map.addLayer({ id: "addr_b-marker", type: "circle", source: "addr_b", + paint: { "circle-radius": 9, "circle-color": "#9333ea", "circle-stroke-color": "#fff", "circle-stroke-width": 2.5 } }); + }); +} + +function resetSide(side) { + const ul = document.getElementById("steps" + side.toUpperCase()); + ul.innerHTML = ""; + for (const sid of STEPS_ORDER) { + const [lbl, hint] = STEP_LABELS[sid] || [sid, ""]; + const li = document.createElement("li"); + li.id = `step-${side}-${sid}`; + li.className = "pending"; + li.innerHTML = ` + +
      +
      ${lbl}
      +
      ${hint}
      +
      + `; + ul.appendChild(li); + } + document.getElementById("step-" + side + "-" + STEPS_ORDER[0]).classList.replace("pending", "running"); + + document.getElementById("report" + side.toUpperCase()).classList.add("hidden"); + document.getElementById("paragraph" + side.toUpperCase()).innerHTML = ""; + document.getElementById("glance" + side.toUpperCase()).innerHTML = ""; + document.getElementById("sources" + side.toUpperCase()).innerHTML = ""; +} + +function markStep(side, stepId, ev) { + const li = document.getElementById(`step-${side}-${stepId}`); + if (!li) return; + li.className = ev.ok ? "ok" : "err"; + li.querySelector(".icon").textContent = ev.ok ? "✓" : "✗"; + if (ev.elapsed_s != null) { + li.querySelector(".time").textContent = ev.elapsed_s.toFixed(2) + "s"; + } + if (ev.result) { + let div = li.querySelector(".result"); + if (!div) { + div = document.createElement("div"); div.className = "result"; + li.appendChild(div); + } + div.textContent = formatResult(ev.result); + } else if (ev.err) { + let div = li.querySelector(".result"); + if (!div) { + div = document.createElement("div"); div.className = "result"; + li.appendChild(div); + } + div.textContent = "error: " + ev.err; + } + const idx = STEPS_ORDER.indexOf(stepId); + if (idx >= 0 && idx + 1 < STEPS_ORDER.length) { + const next = document.getElementById(`step-${side}-${STEPS_ORDER[idx + 1]}`); + if (next && next.classList.contains("pending")) next.classList.replace("pending", "running"); + } +} + +function formatResult(r) { + if (typeof r !== "object") return String(r); + return Object.entries(r) + .map(([k, v]) => `${k}: ${typeof v === "object" ? JSON.stringify(v) : v}`) + .join(" · "); +} + +function renderParagraph(side, text) { + const html = (text || "").replace(/\[([a-z0-9_]+)\]/gi, (_, d) => + `[${d}]`); + document.getElementById("paragraph" + side.toUpperCase()).innerHTML = html; +} + +function renderGlance(side, ev) { + const ul = document.getElementById("glance" + side.toUpperCase()); + if (!ul) return; + const rows = []; + if (ev.sandy) { + rows.push({c: "hit", mark: "■", html: "Inside Sandy 2012 inundation extent"}); + } else { + rows.push({c: "miss", mark: "□", html: "Outside Sandy 2012 inundation extent"}); + } + const dep = ev.dep || {}; + const depHits = Object.entries(dep).filter(([_, v]) => (v.depth_class || 0) > 0); + if (depHits.length) { + for (const [scen, v] of depHits) { + const lbl = scen.replace("dep_", "").replace(/_/g, " "); + rows.push({c: "hit", mark: "■", html: `Inside DEP ${lbl} — ${v.depth_label}`}); + } + } else { + rows.push({c: "miss", mark: "□", html: "Outside all DEP stormwater scenarios"}); + } + const fn = ev.floodnet; + if (fn && fn.n_sensors) { + if (fn.n_flood_events_3y > 0) { + const peak = fn.peak_event; + const peakStr = peak && peak.max_depth_mm + ? `, peak ${peak.max_depth_mm} mm` : ''; + rows.push({c: "hit", mark: "■", + html: `${fn.n_flood_events_3y} FloodNet events (3 yr)${peakStr}`}); + } else { + rows.push({c: "miss", mark: "□", + html: `${fn.n_sensors} FloodNet sensor(s), no events`}); + } + } + const ida = ev.ida_hwm; + if (ida && ida.n_within_radius > 0) { + const ht = ida.max_height_above_gnd_ft != null + ? `up to ${ida.max_height_above_gnd_ft} ft above ground` : ''; + rows.push({c: "hit", mark: "■", + html: `${ida.n_within_radius} Ida 2021 HWMs ≤${ida.radius_m} m${ht ? ', ' + ht : ''}`}); + } + const mt = ev.microtopo; + if (mt) { + rows.push({c: "note", mark: "◆", + html: `Elevation ${mt.point_elev_m} m, lower than ${mt.rel_elev_pct_200m}% of nearby (200 m)`}); + } + const c311 = ev.nyc311; + if (c311 && c311.n > 0) { + rows.push({c: "note", mark: "◆", + html: `${c311.n} 311 flood complaints ≤${c311.radius_m} m, ${c311.years} yr`}); + } + ul.innerHTML = rows + .map(r => `
    3. ${r.mark}${r.html}
    4. `).join(""); +} + +function renderSources(side, ev, paraText) { + const fired = new Set([...(paraText || "").matchAll(/\[([a-z0-9_]+)\]/g)].map(m => m[1])); + const order = [ + "sandy", "dep_extreme_2080", "dep_moderate_2050", "dep_moderate_current", + "floodnet", "ida_hwm", "microtopo", "nyc311", + "rag_dep_2013", "rag_nycha", "rag_coned", "rag_mta", "rag_comptroller", + ]; + const present = new Set(); + if (ev.sandy) present.add("sandy"); + for (const [k, v] of Object.entries(ev.dep || {})) { + if ((v.depth_class || 0) > 0) present.add(k); + } + if (ev.floodnet && ev.floodnet.n_sensors > 0) present.add("floodnet"); + if (ev.ida_hwm && ev.ida_hwm.n_within_radius > 0) present.add("ida_hwm"); + if (ev.microtopo) present.add("microtopo"); + if (ev.nyc311 && ev.nyc311.n > 0) present.add("nyc311"); + if (ev.rag) for (const h of ev.rag) present.add(h.doc_id); + + const ol = document.getElementById("sources" + side.toUpperCase()); + ol.innerHTML = order.filter(d => present.has(d)).map(d => { + const label = SOURCE_LABELS[d] || d; + const dim = fired.has(d) ? "" : ' style="opacity:0.5"'; + return `${d}${label}`; + }).join(""); +} + +async function updateMapForSide(side, geo) { + ensureMap(); + if (!map.loaded()) await new Promise(res => map.once("load", res)); + const sideKey = side.toLowerCase(); + map.getSource("addr_" + sideKey).setData({ + type: "FeatureCollection", + features: [{ type: "Feature", geometry: { type: "Point", coordinates: [geo.lon, geo.lat] }, properties: {} }], + }); + const url = (p) => `${p}?lat=${geo.lat}&lon=${geo.lon}&r=1500`; + const [sandy, dep, fn] = await Promise.all([ + fetch(url("/api/layers/sandy")).then(r => r.json()).catch(() => null), + fetch(url("/api/layers/dep_extreme_2080")).then(r => r.json()).catch(() => null), + fetch(`/api/floodnet_near?lat=${geo.lat}&lon=${geo.lon}&r=1000`).then(r => r.json()).catch(() => null), + ]); + if (sandy) map.getSource("sandy_" + sideKey).setData(sandy); + if (dep) map.getSource("dep_" + sideKey).setData(dep); + if (fn) map.getSource("fn_" + sideKey).setData(fn); +} + +function fitBoth(ga, gb) { + if (!ga || !gb || !map.loaded()) return; + const bounds = new maplibregl.LngLatBounds() + .extend([ga.lon, ga.lat]).extend([gb.lon, gb.lat]); + map.fitBounds(bounds, { padding: 80, duration: 800, maxZoom: 13 }); +} + +let geoA = null, geoB = null; + +document.querySelectorAll(".chip[data-a]").forEach((btn) => { + btn.addEventListener("click", (e) => { + e.preventDefault(); + document.getElementById("qa").value = btn.getAttribute("data-a"); + document.getElementById("qb").value = btn.getAttribute("data-b"); + document.getElementById("cform").requestSubmit(); + }); +}); + +document.getElementById("cform").addEventListener("submit", (e) => { + e.preventDefault(); + const a = document.getElementById("qa").value.trim(); + const b = document.getElementById("qb").value.trim(); + if (!a || !b) return; + document.getElementById("aTitle").textContent = a; + document.getElementById("bTitle").textContent = b; + resetSide("a"); resetSide("b"); + ensureMap(); + geoA = geoB = null; + document.getElementById("cgo").disabled = true; + if (evtSrc) evtSrc.close(); + evtSrc = new EventSource(`/api/compare?a=${encodeURIComponent(a)}&b=${encodeURIComponent(b)}`); + + evtSrc.addEventListener("step", (msg) => { + const ev = JSON.parse(msg.data); + markStep(ev.side, ev.step, ev); + }); + evtSrc.addEventListener("final", (msg) => { + const ev = JSON.parse(msg.data); + const side = ev.side; + document.getElementById("report" + side.toUpperCase()).classList.remove("hidden"); + if (ev.geocode) { + if (side === "a") geoA = ev.geocode; else geoB = ev.geocode; + updateMapForSide(side, ev.geocode).then(() => fitBoth(geoA, geoB)); + } + if (ev.paragraph) renderParagraph(side, ev.paragraph); + renderGlance(side, ev); + renderSources(side, ev, ev.paragraph || ""); + }); + evtSrc.addEventListener("done", () => { + document.getElementById("cgo").disabled = false; + evtSrc.close(); + }); + evtSrc.addEventListener("error", () => { + document.getElementById("cgo").disabled = false; + }); +}); diff --git a/web/static/components/briefing.js b/web/static/components/briefing.js new file mode 100644 index 0000000000000000000000000000000000000000..2e87bf5cdd10a680946808985349be683d19a8a5 --- /dev/null +++ b/web/static/components/briefing.js @@ -0,0 +1,133 @@ +// — the streaming-token, citation-chipped briefing panel. +// +// Replaces the agent.js renderMarkdown + rewriteCitations + paint +// scheduler. Token streaming becomes "append to a signal, re-render." +// +// Properties: +// text — full markdown text (set by parent on token / final events) +// streaming — bool; shows the blinking caret +// citeIndex — { doc_id: number } shared with +// sourceLabels — passed through for chip tooltips +// +// Signals consumed: +// highlightedDocId — toggles `.hl` on chips reactively (set by +// on hover) +// Signals updated: +// citeIndex — populated as citations are encountered in the text +// highlightedDocId — set on chip hover/click + +import { html, LitElement } from "https://esm.sh/lit@3"; +import { unsafeHTML } from "https://esm.sh/lit@3/directives/unsafe-html.js"; +import { SignalWatcher } from "https://esm.sh/@lit-labs/signals@0.1.x"; +import { citeIndex, highlightedDocId } from "./signals.js"; + +// Same minimal markdown subset as agent.js renderMarkdown — kept +// duplicated for now; will collapse when agent.js stops calling +// renderMarkdown. After full port this is the only impl. +function renderMarkdownPure(text) { + const lines = text.split("\n"); + const out = []; + let para = []; let bullets = []; + const escapeHtml = (s) => + String(s ?? "").replace(/&/g, "&").replace(//g, ">"); + const flushPara = () => { + if (!para.length) return; + const safe = escapeHtml(para.join(" ").trim()) + .replace(/\*\*([^*]+)\*\*/g, "$1"); + if (safe) out.push(`

      ${safe}

      `); + para = []; + }; + const flushBullets = () => { + if (!bullets.length) return; + const items = bullets.map(b => { + const safe = escapeHtml(b.trim()).replace(/\*\*([^*]+)\*\*/g, "$1"); + return `
    5. ${safe}
    6. `; + }).join(""); + out.push(`
        ${items}
      `); + bullets = []; + }; + // Granite sometimes runs all bullets onto one line. + const expanded = []; + for (const line of lines) { + if (line.trim().startsWith("- ") && line.includes(" - ", 2)) { + const parts = line.split(/(?:^|(?<=\.\s))\s*-\s+/g).filter(p => p.trim()); + for (const p of parts) expanded.push("- " + p.trim()); + } else { expanded.push(line); } + } + for (const line of expanded) { + const m = line.match(/^\s*\*\*([A-Z][A-Za-z\s/]+)\.\*\*\s*$/); + if (m) { flushPara(); flushBullets(); out.push(`

      ${escapeHtml(m[1])}

      `); } + else if (/^\s*[-*]\s+/.test(line)) { flushPara(); bullets.push(line.replace(/^\s*[-*]\s+/, "")); } + else { flushBullets(); para.push(line); } + } + flushPara(); flushBullets(); + return out.join(""); +} + +function rewriteCitations(html, sourceLabels, indexMap) { + return html.replace(/\[([a-z0-9_]+)\]/gi, (_, id) => { + const norm = id.toLowerCase(); + if (indexMap[norm] == null) indexMap[norm] = Object.keys(indexMap).length + 1; + const n = indexMap[norm]; + const lab = sourceLabels[norm] || norm; + return `${n}`; + }); +} + +export class Briefing extends SignalWatcher(LitElement) { + static properties = { + text: { type: String }, + streaming: { type: Boolean, reflect: true }, + sourceLabels: { type: Object }, + }; + + // No shadow DOM — we use the parent's `.report-pane #paragraph` styles + // directly so the markdown renders match the legacy/print idiom. + createRenderRoot() { return this; } + + constructor() { + super(); + this.text = ""; + this.streaming = false; + this.sourceLabels = {}; + } + + updated(changed) { + if (changed.has("text") && this.text) { + // Bind chip hover/click to the highlight signal post-render. + this._bindChips(); + } + } + + _bindChips() { + this.querySelectorAll(".cite").forEach(c => { + const id = c.dataset.srcId; + if (!id || c.dataset.signalBound) return; + c.dataset.signalBound = "1"; + c.addEventListener("mouseenter", () => highlightedDocId.set(id)); + c.addEventListener("click", (e) => { + e.stopPropagation(); + const cur = highlightedDocId.get(); + highlightedDocId.set(cur === id ? null : id); + }); + }); + // Apply highlight class reactively from current signal value. + const hl = highlightedDocId.get(); + this.querySelectorAll(".cite").forEach(c => { + c.classList.toggle("hl", c.dataset.srcId === hl); + }); + } + + render() { + if (!this.text) return html`
      Waiting for content…
      `; + const indexMap = {}; + const md = renderMarkdownPure(this.text); + const withCites = rewriteCitations(md, this.sourceLabels, indexMap); + // Push the citation index up to the shared signal so SourcesFooter + // re-renders. Done in render() because indexMap is computed here. + queueMicrotask(() => citeIndex.set({ ...indexMap })); + return html`${unsafeHTML(withCites)}`; + } +} + +customElements.define("r-briefing", Briefing); diff --git a/web/static/components/signals.js b/web/static/components/signals.js new file mode 100644 index 0000000000000000000000000000000000000000..f9059d50f9235ab43fcf7b9a45697977bccdedce --- /dev/null +++ b/web/static/components/signals.js @@ -0,0 +1,21 @@ +// Shared reactive state for Riprap web components. +// +// Lit components import these signals; updating one signal re-renders +// every subscribed component. Replaces the hand-wired DOM-querying +// cross-linking we used to do in vanilla JS. + +import { signal } from "https://esm.sh/@lit-labs/signals@0.1.x"; + +// Currently-highlighted citation doc_id. When a Briefing chip is hovered +// or clicked, this gets set; SourcesFooter observes it and highlights +// the matching row, and vice versa. +export const highlightedDocId = signal(null); + +// The full agent run output (from /api/agent/stream `final` event). +// Components that need the result post-render read from this. +export const lastResult = signal(null); + +// The cite-index map { doc_id: number } populated by Briefing as it +// renders the streamed markdown. SourcesFooter reads it to know which +// numbered rows to render. +export const citeIndex = signal({}); diff --git a/web/static/components/sources-footer.js b/web/static/components/sources-footer.js new file mode 100644 index 0000000000000000000000000000000000000000..345ad06c0c5a56e107d224a6c97ecfc30c986ea8 --- /dev/null +++ b/web/static/components/sources-footer.js @@ -0,0 +1,144 @@ +// — numbered, hyperlinked, vintage-aware Sources +// section that appears below the Briefing in the agent UI and inside +// the printable /report. +// +// Shared signals power the cross-linking with : hovering +// a [N] chip in the prose highlights the matching
    7. here, and +// clicking either side persists the highlight + scrolls into view. +// +// Mounts via . Reads: +// - citeIndex — { doc_id: number } from Briefing +// - highlightedDocId — current highlight target (in/out) +// Plus three label/url/vintage maps passed in as properties. + +import { html, css, LitElement } from "https://esm.sh/lit@3"; +import { SignalWatcher } from "https://esm.sh/@lit-labs/signals@0.1.x"; +import { citeIndex, highlightedDocId } from "./signals.js"; + +export class SourcesFooter extends SignalWatcher(LitElement) { + static properties = { + labels: { type: Object }, + urls: { type: Object }, + vintages: { type: Object }, + }; + + static styles = css` + :host { + display: block; + border-top: 1px solid var(--line, #e5e7eb); + background: var(--bg-soft, #f5f7fb); + padding: 12px 16px 14px; + } + :host([hidden]) { display: none; } + .src-h { + font-size: 10px; font-weight: 700; + text-transform: uppercase; letter-spacing: 0.10em; + color: var(--text-muted, #6b7280); + margin: 0 0 8px; + } + ol { + margin: 0; padding: 0; list-style: none; + display: grid; gap: 6px; + font-size: 11.5px; line-height: 1.45; + } + li { + display: grid; grid-template-columns: 22px 1fr; + gap: 8px; align-items: baseline; + padding: 4px 6px; border-radius: 3px; + cursor: pointer; + transition: background 0.15s; + } + li:hover, li.hl { + background: rgba(22, 66, 223, 0.10); + } + .src-num { + font-family: var(--mono, monospace); font-size: 10.5px; + font-weight: 700; color: var(--nyc-blue, #1642DF); + text-align: right; + } + .src-link { + color: var(--text, #111); text-decoration: none; + border-bottom: 1px dotted var(--text-muted, #6b7280); + transition: color 0.12s, border-color 0.12s; + } + .src-link:hover { + color: var(--nyc-blue, #1642DF); + border-bottom-color: var(--nyc-blue, #1642DF); + } + .src-ext { + font-size: 9.5px; color: var(--text-faint, #9ca3af); + margin-left: 2px; vertical-align: super; + } + .src-vintage { + display: block; color: var(--text-muted, #6b7280); + font-size: 9.5px; margin-top: 2px; + } + .src-id { + display: inline-block; + font-family: var(--mono, monospace); font-size: 9.5px; + color: var(--text-faint, #9ca3af); margin-left: 6px; + } + `; + + constructor() { + super(); + this.labels = {}; + this.urls = {}; + this.vintages = {}; + } + + _entries() { + return Object.entries(citeIndex.get() || {}).sort((a, b) => a[1] - b[1]); + } + + _onHover(id) { + highlightedDocId.set(id); + } + + _onLeave() { + // Only clear if not pinned by click — keep highlight on click. + // For now, hover-only highlight clears on leave. + } + + _onClick(id) { + const cur = highlightedDocId.get(); + highlightedDocId.set(cur === id ? null : id); + } + + render() { + const entries = this._entries(); + if (!entries.length) { + this.setAttribute("hidden", ""); + return html``; + } + this.removeAttribute("hidden"); + const hl = highlightedDocId.get(); + return html` +
      Sources
      +
        + ${entries.map(([id, n]) => { + const url = this.urls[id]; + const label = this.labels[id] || id; + const vintage = this.vintages[id]; + const cls = id === hl ? "hl" : ""; + return html` +
      1. this._onHover(id)} + @click=${() => this._onClick(id)}> + [${n}] +
        + ${url + ? html` e.stopPropagation()}>${label} ` + : html`${label}`} + ${id} + ${vintage ? html`${vintage}` : ""} +
        +
      2. + `; + })} +
      + `; + } +} + +customElements.define("r-sources-footer", SourcesFooter); diff --git a/web/static/components/trace.js b/web/static/components/trace.js new file mode 100644 index 0000000000000000000000000000000000000000..6143a33a1f1f93b98cb9b16d36dec9be7709f7b0 --- /dev/null +++ b/web/static/components/trace.js @@ -0,0 +1,87 @@ +// — specialist trail. Reactive list of pipeline steps. +// +// API: +// .pushStep(step) — append a {step, ok, elapsed_s, result, err} record +// .clear() — reset +// .meta = "1.4s" — text shown in the header +// .stepLabels = {...} — { stepName: [label, hint] } map (set once at boot) +// +// Light DOM (no shadow) so the existing `#steps li.ok / .err / .running` +// CSS in agent.html keeps applying without rewrites. + +import { html, css, LitElement } from "https://esm.sh/lit@3"; + +const escapeHtml = (s) => + String(s ?? "").replace(/&/g, "&").replace(//g, ">"); + +export class Trace extends LitElement { + static properties = { + steps: { type: Array, state: true }, + meta: { type: String, reflect: true }, + stepLabels: { type: Object }, + }; + + createRenderRoot() { return this; } + + constructor() { + super(); + this.steps = []; + this.meta = ""; + this.stepLabels = {}; + } + + pushStep(step) { + this.steps = [...this.steps, step]; + } + + clear() { + this.steps = []; + this.meta = ""; + } + + _renderStep(step) { + const [label, hint] = this.stepLabels[step.step] || [step.step, ""]; + const ok = step.ok === true; + const fail = step.ok === false; + const cls = ok ? "ok" : fail ? "err" : "running"; + const mark = ok ? "✓" : fail ? "✗" : "○"; + const time = step.elapsed_s != null + ? `${step.elapsed_s}s` : ""; + const result = step.result + ? `
      ${escapeHtml(JSON.stringify(step.result))}
      ` : ""; + const err = step.err + ? `
      ${escapeHtml(step.err)}
      ` : ""; + // Inner HTML is hand-built so the existing list CSS targets the same + // structure as the legacy renderer; we keep .innerHTML rather than + // Lit's html`` for byte-for-byte parity here. + const li = document.createElement("li"); + li.className = cls; + li.innerHTML = ` + ${mark} +
      +
      ${escapeHtml(label)}
      +
      ${escapeHtml(hint)}
      +
      + ${time} + ${result} + ${err} + `; + return li; + } + + render() { + // Render the
        as innerHTML on update so we don't fight Lit's + // template diffing for raw HTML lists. + queueMicrotask(() => { + const ol = this.querySelector("ol#steps-list"); + if (!ol) return; + ol.innerHTML = ""; + for (const s of this.steps) ol.appendChild(this._renderStep(s)); + }); + // Inline reset so the legacy `#steps { list-style: none; ... }` rules + // (which now target the host element, not the
          ) keep applying. + return html`
            `; + } +} + +customElements.define("r-trace", Trace); diff --git a/web/static/dist/riprap.js b/web/static/dist/riprap.js new file mode 100644 index 0000000000000000000000000000000000000000..a69d9b1a9a5724094a3c8d5fbe2d80a98fe1dee5 --- /dev/null +++ b/web/static/dist/riprap.js @@ -0,0 +1,3777 @@ +const es = "5"; +typeof window < "u" && ((window.__svelte ??= {}).v ??= /* @__PURE__ */ new Set()).add(es); +const ts = 1, rs = 2, ns = 16, ss = 1, is = 4, ls = 8, os = 16, as = 4, fs = 1, us = 2, Fr = "[", rr = "[!", $r = "[?", nr = "]", Ge = {}, P = Symbol(), zr = "http://www.w3.org/1999/xhtml", cs = "http://www.w3.org/2000/svg", hs = "http://www.w3.org/1998/Math/MathML", ds = !1; +var jr = Array.isArray, vs = Array.prototype.indexOf, rt = Array.prototype.includes, Ct = Array.from, At = Object.keys, gt = Object.defineProperty, qe = Object.getOwnPropertyDescriptor, ps = Object.getOwnPropertyDescriptors, _s = Object.prototype, gs = Array.prototype, Hr = Object.getPrototypeOf, kr = Object.isExtensible; +function bs(e) { + return typeof e == "function"; +} +const re = () => { +}; +function ms(e) { + for (var t = 0; t < e.length; t++) + e[t](); +} +function qr() { + var e, t, r = new Promise((n, s) => { + e = n, t = s; + }); + return { promise: r, resolve: e, reject: t }; +} +function ws(e, t) { + if (Array.isArray(e)) + return e; + if (!(Symbol.iterator in e)) + return Array.from(e); + const r = []; + for (const n of e) + if (r.push(n), r.length === t) break; + return r; +} +const H = 2, nt = 4, Rt = 8, Br = 1 << 24, he = 16, de = 32, Ne = 64, Ht = 128, se = 512, D = 1024, z = 2048, _e = 4096, V = 8192, Z = 16384, Ce = 32768, qt = 1 << 25, Ke = 65536, Bt = 1 << 17, ys = 1 << 18, Xe = 1 << 19, $s = 1 << 20, Ee = 1 << 25, Je = 65536, Nt = 1 << 21, bt = 1 << 22, De = 1 << 23, vt = Symbol("$state"), Ur = Symbol("legacy props"), ye = new class extends Error { + name = "StaleReactionError"; + message = "The reaction that called `getAbortSignal()` was re-run or destroyed"; +}(), ks = ( + // We gotta write it like this because after downleveling the pure comment may end up in the wrong location + !!globalThis.document?.contentType && /* @__PURE__ */ globalThis.document.contentType.includes("xml") +), Lt = 3, at = 8; +function Es(e) { + throw new Error("https://svelte.dev/e/lifecycle_outside_component"); +} +function xs() { + throw new Error("https://svelte.dev/e/async_derived_orphan"); +} +function Ts(e, t, r) { + throw new Error("https://svelte.dev/e/each_key_duplicate"); +} +function Ss(e) { + throw new Error("https://svelte.dev/e/effect_in_teardown"); +} +function As() { + throw new Error("https://svelte.dev/e/effect_in_unowned_derived"); +} +function Ns(e) { + throw new Error("https://svelte.dev/e/effect_orphan"); +} +function Os() { + throw new Error("https://svelte.dev/e/effect_update_depth_exceeded"); +} +function Ms() { + throw new Error("https://svelte.dev/e/hydration_failed"); +} +function Cs(e) { + throw new Error("https://svelte.dev/e/props_invalid_value"); +} +function Rs() { + throw new Error("https://svelte.dev/e/state_descriptors_fixed"); +} +function Ls() { + throw new Error("https://svelte.dev/e/state_prototype_fixed"); +} +function Is() { + throw new Error("https://svelte.dev/e/state_unsafe_mutation"); +} +function Ds() { + throw new Error("https://svelte.dev/e/svelte_boundary_reset_onerror"); +} +function Ps() { + console.warn("https://svelte.dev/e/derived_inert"); +} +function wt(e) { + console.warn("https://svelte.dev/e/hydration_mismatch"); +} +function Fs() { + console.warn("https://svelte.dev/e/svelte_boundary_reset_noop"); +} +let N = !1; +function xe(e) { + N = e; +} +let x; +function j(e) { + if (e === null) + throw wt(), Ge; + return x = e; +} +function st() { + return j(/* @__PURE__ */ ve(x)); +} +function F(e) { + if (N) { + if (/* @__PURE__ */ ve(x) !== null) + throw wt(), Ge; + x = e; + } +} +function Vr(e = 1) { + if (N) { + for (var t = e, r = x; t--; ) + r = /** @type {TemplateNode} */ + /* @__PURE__ */ ve(r); + x = r; + } +} +function Ot(e = !0) { + for (var t = 0, r = x; ; ) { + if (r.nodeType === at) { + var n = ( + /** @type {Comment} */ + r.data + ); + if (n === nr) { + if (t === 0) return r; + t -= 1; + } else (n === Fr || n === rr || // "[1", "[2", etc. for if blocks + n[0] === "[" && !isNaN(Number(n.slice(1)))) && (t += 1); + } + var s = ( + /** @type {TemplateNode} */ + /* @__PURE__ */ ve(r) + ); + e && r.remove(), r = s; + } +} +function Yr(e) { + if (!e || e.nodeType !== at) + throw wt(), Ge; + return ( + /** @type {Comment} */ + e.data + ); +} +function Wr(e) { + return e === this.v; +} +function Gr(e, t) { + return e != e ? t == t : e !== t || e !== null && typeof e == "object" || typeof e == "function"; +} +function Kr(e) { + return !Gr(e, this.v); +} +let zs = !1, G = null; +function it(e) { + G = e; +} +function It(e, t = !1, r) { + G = { + p: G, + i: !1, + c: null, + e: null, + s: e, + x: null, + r: ( + /** @type {Effect} */ + y + ), + l: null + }; +} +function Dt(e) { + var t = ( + /** @type {ComponentContext} */ + G + ), r = t.e; + if (r !== null) { + t.e = null; + for (var n of r) + wn(n); + } + return e !== void 0 && (t.x = e), t.i = !0, G = t.p, e ?? /** @type {T} */ + {}; +} +function Jr() { + return !0; +} +let ze = []; +function Zr() { + var e = ze; + ze = [], ms(e); +} +function Pe(e) { + if (ze.length === 0 && !pt) { + var t = ze; + queueMicrotask(() => { + t === ze && Zr(); + }); + } + ze.push(e); +} +function js() { + for (; ze.length > 0; ) + Zr(); +} +function Xr(e) { + var t = y; + if (t === null) + return A.f |= De, e; + if (!(t.f & Ce) && !(t.f & nt)) + throw e; + Ie(e, t); +} +function Ie(e, t) { + for (; t !== null; ) { + if (t.f & Ht) { + if (!(t.f & Ce)) + throw e; + try { + t.b.error(e); + return; + } catch (r) { + e = r; + } + } + t = t.parent; + } + throw e; +} +const Hs = -7169; +function R(e, t) { + e.f = e.f & Hs | t; +} +function sr(e) { + e.f & se || e.deps === null ? R(e, D) : R(e, _e); +} +function Qr(e) { + if (e !== null) + for (const t of e) + !(t.f & H) || !(t.f & Je) || (t.f ^= Je, Qr( + /** @type {Derived} */ + t.deps + )); +} +function en(e, t, r) { + e.f & z ? t.add(e) : e.f & _e && r.add(e), Qr(e.deps), R(e, D); +} +function tn(e, t, r) { + if (e == null) + return t(void 0), re; + const n = ft( + () => e.subscribe( + t, + // @ts-expect-error + r + ) + ); + return n.unsubscribe ? () => n.unsubscribe() : n; +} +const Qe = []; +function rn(e, t = re) { + let r = null; + const n = /* @__PURE__ */ new Set(); + function s(l) { + if (Gr(e, l) && (e = l, r)) { + const f = !Qe.length; + for (const a of n) + a[1](), Qe.push(a, e); + if (f) { + for (let a = 0; a < Qe.length; a += 2) + Qe[a][0](Qe[a + 1]); + Qe.length = 0; + } + } + } + function i(l) { + s(l( + /** @type {T} */ + e + )); + } + function o(l, f = re) { + const a = [l, f]; + return n.add(a), n.size === 1 && (r = t(s, i) || re), l( + /** @type {T} */ + e + ), () => { + n.delete(a), n.size === 0 && r && (r(), r = null); + }; + } + return { set: s, update: i, subscribe: o }; +} +function qs(e) { + let t; + return tn(e, (r) => t = r)(), t; +} +let kt = !1, Ut = Symbol(); +function Vt(e, t, r) { + const n = r[t] ??= { + store: null, + source: /* @__PURE__ */ ar(void 0), + unsubscribe: re + }; + if (n.store !== e && !(Ut in r)) + if (n.unsubscribe(), n.store = e ?? null, e == null) + n.source.v = void 0, n.unsubscribe = re; + else { + var s = !0; + n.unsubscribe = tn(e, (i) => { + s ? n.source.v = i : te(n.source, i); + }), s = !1; + } + return e && Ut in r ? qs(e) : b(n.source); +} +function nn() { + const e = {}; + function t() { + cr(() => { + for (var r in e) + e[r].unsubscribe(); + gt(e, Ut, { + enumerable: !1, + value: !0 + }); + }); + } + return [e, t]; +} +function Bs(e) { + var t = kt; + try { + return kt = !1, [e(), kt]; + } finally { + kt = t; + } +} +const Fe = /* @__PURE__ */ new Set(); +let S = null, ue = null, Yt = null, pt = !1, jt = !1, et = null, xt = null; +var Er = 0; +let Us = 1; +class Oe { + id = Us++; + /** + * The current values of any signals that are updated in this batch. + * Tuple format: [value, is_derived] (note: is_derived is false for deriveds, too, if they were overridden via assignment) + * They keys of this map are identical to `this.#previous` + * @type {Map} + */ + current = /* @__PURE__ */ new Map(); + /** + * The values of any signals (sources and deriveds) that are updated in this batch _before_ those updates took place. + * They keys of this map are identical to `this.#current` + * @type {Map} + */ + previous = /* @__PURE__ */ new Map(); + /** + * When the batch is committed (and the DOM is updated), we need to remove old branches + * and append new ones by calling the functions added inside (if/each/key/etc) blocks + * @type {Set<(batch: Batch) => void>} + */ + #e = /* @__PURE__ */ new Set(); + /** + * If a fork is discarded, we need to destroy any effects that are no longer needed + * @type {Set<(batch: Batch) => void>} + */ + #n = /* @__PURE__ */ new Set(); + /** + * Callbacks that should run only when a fork is committed. + * @type {Set<(batch: Batch) => void>} + */ + #t = /* @__PURE__ */ new Set(); + /** + * Async effects that are currently in flight + * @type {Map} + */ + #i = /* @__PURE__ */ new Map(); + /** + * Async effects that are currently in flight, _not_ inside a pending boundary + * @type {Map} + */ + #s = /* @__PURE__ */ new Map(); + /** + * A deferred that resolves when the batch is committed, used with `settled()` + * TODO replace with Promise.withResolvers once supported widely enough + * @type {{ promise: Promise, resolve: (value?: any) => void, reject: (reason: unknown) => void } | null} + */ + #l = null; + /** + * The root effects that need to be flushed + * @type {Effect[]} + */ + #r = []; + /** + * Effects created while this batch was active. + * @type {Effect[]} + */ + #o = []; + /** + * Deferred effects (which run after async work has completed) that are DIRTY + * @type {Set} + */ + #f = /* @__PURE__ */ new Set(); + /** + * Deferred effects that are MAYBE_DIRTY + * @type {Set} + */ + #u = /* @__PURE__ */ new Set(); + /** + * A map of branches that still exist, but will be destroyed when this batch + * is committed — we skip over these during `process`. + * The value contains child effects that were dirty/maybe_dirty before being reset, + * so they can be rescheduled if the branch survives. + * @type {Map} + */ + #a = /* @__PURE__ */ new Map(); + /** + * Inverse of #skipped_branches which we need to tell prior batches to unskip them when committing + * @type {Set} + */ + #h = /* @__PURE__ */ new Set(); + is_fork = !1; + #v = !1; + /** @type {Set} */ + #d = /* @__PURE__ */ new Set(); + #c() { + return this.is_fork || this.#s.size > 0; + } + #b() { + for (const n of this.#d) + for (const s of n.#s.keys()) { + for (var t = !1, r = s; r.parent !== null; ) { + if (this.#a.has(r)) { + t = !0; + break; + } + r = r.parent; + } + if (!t) + return !0; + } + return !1; + } + /** + * Add an effect to the #skipped_branches map and reset its children + * @param {Effect} effect + */ + skip_effect(t) { + this.#a.has(t) || this.#a.set(t, { d: [], m: [] }), this.#h.delete(t); + } + /** + * Remove an effect from the #skipped_branches map and reschedule + * any tracked dirty/maybe_dirty child effects + * @param {Effect} effect + * @param {(e: Effect) => void} callback + */ + unskip_effect(t, r = (n) => this.schedule(n)) { + var n = this.#a.get(t); + if (n) { + this.#a.delete(t); + for (var s of n.d) + R(s, z), r(s); + for (s of n.m) + R(s, _e), r(s); + } + this.#h.add(t); + } + #p() { + if (Er++ > 1e3 && (Fe.delete(this), Vs()), !this.#c()) { + for (const l of this.#f) + this.#u.delete(l), R(l, z), this.schedule(l); + for (const l of this.#u) + R(l, _e), this.schedule(l); + } + const t = this.#r; + this.#r = [], this.apply(); + var r = et = [], n = [], s = xt = []; + for (const l of t) + try { + this.#g(l, r, n); + } catch (f) { + throw on(l), f; + } + if (S = null, s.length > 0) { + var i = Oe.ensure(); + for (const l of s) + i.schedule(l); + } + if (et = null, xt = null, this.#c() || this.#b()) { + this.#_(n), this.#_(r); + for (const [l, f] of this.#a) + ln(l, f); + } else { + this.#i.size === 0 && Fe.delete(this), this.#f.clear(), this.#u.clear(); + for (const l of this.#e) l(this); + this.#e.clear(), xr(n), xr(r), this.#l?.resolve(); + } + var o = ( + /** @type {Batch | null} */ + /** @type {unknown} */ + S + ); + if (this.#r.length > 0) { + const l = o ??= this; + l.#r.push(...this.#r.filter((f) => !l.#r.includes(f))); + } + o !== null && (Fe.add(o), o.#p()); + } + /** + * Traverse the effect tree, executing effects or stashing + * them for later execution as appropriate + * @param {Effect} root + * @param {Effect[]} effects + * @param {Effect[]} render_effects + */ + #g(t, r, n) { + t.f ^= D; + for (var s = t.first; s !== null; ) { + var i = s.f, o = (i & (de | Ne)) !== 0, l = o && (i & D) !== 0, f = l || (i & V) !== 0 || this.#a.has(s); + if (!f && s.fn !== null) { + o ? s.f ^= D : i & nt ? r.push(s) : $t(s) && (i & he && this.#u.add(s), ot(s)); + var a = s.first; + if (a !== null) { + s = a; + continue; + } + } + for (; s !== null; ) { + var u = s.next; + if (u !== null) { + s = u; + break; + } + s = s.parent; + } + } + } + /** + * @param {Effect[]} effects + */ + #_(t) { + for (var r = 0; r < t.length; r += 1) + en(t[r], this.#f, this.#u); + } + /** + * Associate a change to a given source with the current + * batch, noting its previous and current values + * @param {Value} source + * @param {any} value + * @param {boolean} [is_derived] + */ + capture(t, r, n = !1) { + t.v !== P && !this.previous.has(t) && this.previous.set(t, t.v), t.f & De || (this.current.set(t, [r, n]), ue?.set(t, r)), this.is_fork || (t.v = r); + } + activate() { + S = this; + } + deactivate() { + S = null, ue = null; + } + flush() { + try { + jt = !0, S = this, this.#p(); + } finally { + Er = 0, Yt = null, et = null, xt = null, jt = !1, S = null, ue = null, Be.clear(); + } + } + discard() { + for (const t of this.#n) t(this); + this.#n.clear(), this.#t.clear(), Fe.delete(this); + } + /** + * @param {Effect} effect + */ + register_created_effect(t) { + this.#o.push(t); + } + #m() { + for (const u of Fe) { + var t = u.id < this.id, r = []; + for (const [c, [d, v]] of this.current) { + if (u.current.has(c)) { + var n = ( + /** @type {[any, boolean]} */ + u.current.get(c)[0] + ); + if (t && d !== n) + u.current.set(c, [d, v]); + else + continue; + } + r.push(c); + } + var s = [...u.current.keys()].filter((c) => !this.current.has(c)); + if (s.length === 0) + t && u.discard(); + else if (r.length > 0) { + if (t) + for (const c of this.#h) + u.unskip_effect(c, (d) => { + d.f & (he | bt) ? u.schedule(d) : u.#_([d]); + }); + u.activate(); + var i = /* @__PURE__ */ new Set(), o = /* @__PURE__ */ new Map(); + for (var l of r) + sn(l, s, i, o); + o = /* @__PURE__ */ new Map(); + var f = [...u.current.keys()].filter( + (c) => this.current.has(c) ? ( + /** @type {[any, boolean]} */ + this.current.get(c)[0] !== c + ) : !0 + ); + for (const c of this.#o) + !(c.f & (Z | V | Bt)) && ir(c, f, o) && (c.f & (bt | he) ? (R(c, z), u.schedule(c)) : u.#f.add(c)); + if (u.#r.length > 0) { + u.apply(); + for (var a of u.#r) + u.#g(a, [], []); + u.#r = []; + } + u.deactivate(); + } + } + for (const u of Fe) + u.#d.has(this) && (u.#d.delete(this), u.#d.size === 0 && !u.#c() && (u.activate(), u.#p())); + } + /** + * @param {boolean} blocking + * @param {Effect} effect + */ + increment(t, r) { + let n = this.#i.get(r) ?? 0; + if (this.#i.set(r, n + 1), t) { + let s = this.#s.get(r) ?? 0; + this.#s.set(r, s + 1); + } + } + /** + * @param {boolean} blocking + * @param {Effect} effect + * @param {boolean} skip - whether to skip updates (because this is triggered by a stale reaction) + */ + decrement(t, r, n) { + let s = this.#i.get(r) ?? 0; + if (s === 1 ? this.#i.delete(r) : this.#i.set(r, s - 1), t) { + let i = this.#s.get(r) ?? 0; + i === 1 ? this.#s.delete(r) : this.#s.set(r, i - 1); + } + this.#v || n || (this.#v = !0, Pe(() => { + this.#v = !1, this.flush(); + })); + } + /** + * @param {Set} dirty_effects + * @param {Set} maybe_dirty_effects + */ + transfer_effects(t, r) { + for (const n of t) + this.#f.add(n); + for (const n of r) + this.#u.add(n); + t.clear(), r.clear(); + } + /** @param {(batch: Batch) => void} fn */ + oncommit(t) { + this.#e.add(t); + } + /** @param {(batch: Batch) => void} fn */ + ondiscard(t) { + this.#n.add(t); + } + /** @param {(batch: Batch) => void} fn */ + on_fork_commit(t) { + this.#t.add(t); + } + run_fork_commit_callbacks() { + for (const t of this.#t) t(this); + this.#t.clear(); + } + settled() { + return (this.#l ??= qr()).promise; + } + static ensure() { + if (S === null) { + const t = S = new Oe(); + jt || (Fe.add(S), pt || Pe(() => { + S === t && t.flush(); + })); + } + return S; + } + apply() { + { + ue = null; + return; + } + } + /** + * + * @param {Effect} effect + */ + schedule(t) { + if (Yt = t, t.b?.is_pending && t.f & (nt | Rt | Br) && !(t.f & Ce)) { + t.b.defer_effect(t); + return; + } + for (var r = t; r.parent !== null; ) { + r = r.parent; + var n = r.f; + if (et !== null && r === y && (A === null || !(A.f & H))) + return; + if (n & (Ne | de)) { + if (!(n & D)) + return; + r.f ^= D; + } + } + this.#r.push(r); + } +} +function Se(e) { + var t = pt; + pt = !0; + try { + for (var r; ; ) { + if (js(), S === null) + return ( + /** @type {T} */ + r + ); + S.flush(); + } + } finally { + pt = t; + } +} +function Vs() { + try { + Os(); + } catch (e) { + Ie(e, Yt); + } +} +let we = null; +function xr(e) { + var t = e.length; + if (t !== 0) { + for (var r = 0; r < t; ) { + var n = e[r++]; + if (!(n.f & (Z | V)) && $t(n) && (we = /* @__PURE__ */ new Set(), ot(n), n.deps === null && n.first === null && n.nodes === null && n.teardown === null && n.ac === null && kn(n), we?.size > 0)) { + Be.clear(); + for (const s of we) { + if (s.f & (Z | V)) continue; + const i = [s]; + let o = s.parent; + for (; o !== null; ) + we.has(o) && (we.delete(o), i.push(o)), o = o.parent; + for (let l = i.length - 1; l >= 0; l--) { + const f = i[l]; + f.f & (Z | V) || ot(f); + } + } + we.clear(); + } + } + we = null; + } +} +function sn(e, t, r, n) { + if (!r.has(e) && (r.add(e), e.reactions !== null)) + for (const s of e.reactions) { + const i = s.f; + i & H ? sn( + /** @type {Derived} */ + s, + t, + r, + n + ) : i & (bt | he) && !(i & z) && ir(s, t, n) && (R(s, z), lr( + /** @type {Effect} */ + s + )); + } +} +function ir(e, t, r) { + const n = r.get(e); + if (n !== void 0) return n; + if (e.deps !== null) + for (const s of e.deps) { + if (rt.call(t, s)) + return !0; + if (s.f & H && ir( + /** @type {Derived} */ + s, + t, + r + )) + return r.set( + /** @type {Derived} */ + s, + !0 + ), !0; + } + return r.set(e, !1), !1; +} +function lr(e) { + S.schedule(e); +} +function ln(e, t) { + if (!(e.f & de && e.f & D)) { + e.f & z ? t.d.push(e) : e.f & _e && t.m.push(e), R(e, D); + for (var r = e.first; r !== null; ) + ln(r, t), r = r.next; + } +} +function on(e) { + R(e, D); + for (var t = e.first; t !== null; ) + on(t), t = t.next; +} +function Ys(e) { + let t = 0, r = Ze(0), n; + return () => { + ur() && (b(r), dr(() => (t === 0 && (n = ft(() => e(() => _t(r)))), t += 1, () => { + Pe(() => { + t -= 1, t === 0 && (n?.(), n = void 0, _t(r)); + }); + }))); + }; +} +var Ws = Ke | Xe; +function Gs(e, t, r, n) { + new Ks(e, t, r, n); +} +class Ks { + /** @type {Boundary | null} */ + parent; + is_pending = !1; + /** + * API-level transformError transform function. Transforms errors before they reach the `failed` snippet. + * Inherited from parent boundary, or defaults to identity. + * @type {(error: unknown) => unknown} + */ + transform_error; + /** @type {TemplateNode} */ + #e; + /** @type {TemplateNode | null} */ + #n = N ? x : null; + /** @type {BoundaryProps} */ + #t; + /** @type {((anchor: Node) => void)} */ + #i; + /** @type {Effect} */ + #s; + /** @type {Effect | null} */ + #l = null; + /** @type {Effect | null} */ + #r = null; + /** @type {Effect | null} */ + #o = null; + /** @type {DocumentFragment | null} */ + #f = null; + #u = 0; + #a = 0; + #h = !1; + /** @type {Set} */ + #v = /* @__PURE__ */ new Set(); + /** @type {Set} */ + #d = /* @__PURE__ */ new Set(); + /** + * A source containing the number of pending async deriveds/expressions. + * Only created if `$effect.pending()` is used inside the boundary, + * otherwise updating the source results in needless `Batch.ensure()` + * calls followed by no-op flushes + * @type {Source | null} + */ + #c = null; + #b = Ys(() => (this.#c = Ze(this.#u), () => { + this.#c = null; + })); + /** + * @param {TemplateNode} node + * @param {BoundaryProps} props + * @param {((anchor: Node) => void)} children + * @param {((error: unknown) => unknown) | undefined} [transform_error] + */ + constructor(t, r, n, s) { + this.#e = t, this.#t = r, this.#i = (i) => { + var o = ( + /** @type {Effect} */ + y + ); + o.b = this, o.f |= Ht, n(i); + }, this.parent = /** @type {Effect} */ + y.b, this.transform_error = s ?? this.parent?.transform_error ?? ((i) => i), this.#s = vr(() => { + if (N) { + const i = ( + /** @type {Comment} */ + this.#n + ); + st(); + const o = i.data === rr; + if (i.data.startsWith($r)) { + const f = JSON.parse(i.data.slice($r.length)); + this.#g(f); + } else o ? this.#_() : this.#p(); + } else + this.#m(); + }, Ws), N && (this.#e = x); + } + #p() { + try { + this.#l = ee(() => this.#i(this.#e)); + } catch (t) { + this.error(t); + } + } + /** + * @param {unknown} error The deserialized error from the server's hydration comment + */ + #g(t) { + const r = this.#t.failed; + r && (this.#o = ee(() => { + r( + this.#e, + () => t, + () => () => { + } + ); + })); + } + #_() { + const t = this.#t.pending; + t && (this.is_pending = !0, this.#r = ee(() => t(this.#e)), Pe(() => { + var r = this.#f = document.createDocumentFragment(), n = ie(); + r.append(n), this.#l = this.#y(() => ee(() => this.#i(n))), this.#a === 0 && (this.#e.before(r), this.#f = null, Ue( + /** @type {Effect} */ + this.#r, + () => { + this.#r = null; + } + ), this.#w( + /** @type {Batch} */ + S + )); + })); + } + #m() { + try { + if (this.is_pending = this.has_pending_snippet(), this.#a = 0, this.#u = 0, this.#l = ee(() => { + this.#i(this.#e); + }), this.#a > 0) { + var t = this.#f = document.createDocumentFragment(); + gr(this.#l, t); + const r = ( + /** @type {(anchor: Node) => void} */ + this.#t.pending + ); + this.#r = ee(() => r(this.#e)); + } else + this.#w( + /** @type {Batch} */ + S + ); + } catch (r) { + this.error(r); + } + } + /** + * @param {Batch} batch + */ + #w(t) { + this.is_pending = !1, t.transfer_effects(this.#v, this.#d); + } + /** + * Defer an effect inside a pending boundary until the boundary resolves + * @param {Effect} effect + */ + defer_effect(t) { + en(t, this.#v, this.#d); + } + /** + * Returns `false` if the effect exists inside a boundary whose pending snippet is shown + * @returns {boolean} + */ + is_rendered() { + return !this.is_pending && (!this.parent || this.parent.is_rendered()); + } + has_pending_snippet() { + return !!this.#t.pending; + } + /** + * @template T + * @param {() => T} fn + */ + #y(t) { + var r = y, n = A, s = G; + ge(this.#s), oe(this.#s), it(this.#s.ctx); + try { + return Oe.ensure(), t(); + } catch (i) { + return Xr(i), null; + } finally { + ge(r), oe(n), it(s); + } + } + /** + * Updates the pending count associated with the currently visible pending snippet, + * if any, such that we can replace the snippet with content once work is done + * @param {1 | -1} d + * @param {Batch} batch + */ + #$(t, r) { + if (!this.has_pending_snippet()) { + this.parent && this.parent.#$(t, r); + return; + } + this.#a += t, this.#a === 0 && (this.#w(r), this.#r && Ue(this.#r, () => { + this.#r = null; + }), this.#f && (this.#e.before(this.#f), this.#f = null)); + } + /** + * Update the source that powers `$effect.pending()` inside this boundary, + * and controls when the current `pending` snippet (if any) is removed. + * Do not call from inside the class + * @param {1 | -1} d + * @param {Batch} batch + */ + update_pending_count(t, r) { + this.#$(t, r), this.#u += t, !(!this.#c || this.#h) && (this.#h = !0, Pe(() => { + this.#h = !1, this.#c && lt(this.#c, this.#u); + })); + } + get_effect_pending() { + return this.#b(), b( + /** @type {Source} */ + this.#c + ); + } + /** @param {unknown} error */ + error(t) { + if (!this.#t.onerror && !this.#t.failed) + throw t; + S?.is_fork ? (this.#l && S.skip_effect(this.#l), this.#r && S.skip_effect(this.#r), this.#o && S.skip_effect(this.#o), S.on_fork_commit(() => { + this.#k(t); + })) : this.#k(t); + } + /** + * @param {unknown} error + */ + #k(t) { + this.#l && (Y(this.#l), this.#l = null), this.#r && (Y(this.#r), this.#r = null), this.#o && (Y(this.#o), this.#o = null), N && (j( + /** @type {TemplateNode} */ + this.#n + ), Vr(), j(Ot())); + var r = this.#t.onerror; + let n = this.#t.failed; + var s = !1, i = !1; + const o = () => { + if (s) { + Fs(); + return; + } + s = !0, i && Ds(), this.#o !== null && Ue(this.#o, () => { + this.#o = null; + }), this.#y(() => { + this.#m(); + }); + }, l = (f) => { + try { + i = !0, r?.(f, o), i = !1; + } catch (a) { + Ie(a, this.#s && this.#s.parent); + } + n && (this.#o = this.#y(() => { + try { + return ee(() => { + var a = ( + /** @type {Effect} */ + y + ); + a.b = this, a.f |= Ht, n( + this.#e, + () => f, + () => o + ); + }); + } catch (a) { + return Ie( + a, + /** @type {Effect} */ + this.#s.parent + ), null; + } + })); + }; + Pe(() => { + var f; + try { + f = this.transform_error(t); + } catch (a) { + Ie(a, this.#s && this.#s.parent); + return; + } + f !== null && typeof f == "object" && typeof /** @type {any} */ + f.then == "function" ? f.then( + l, + /** @param {unknown} e */ + (a) => Ie(a, this.#s && this.#s.parent) + ) : l(f); + }); + } +} +function Js(e, t, r, n) { + const s = Pt; + var i = e.filter((d) => !d.settled); + if (r.length === 0 && i.length === 0) { + n(t.map(s)); + return; + } + var o = ( + /** @type {Effect} */ + y + ), l = Zs(), f = i.length === 1 ? i[0].promise : i.length > 1 ? Promise.all(i.map((d) => d.promise)) : null; + function a(d) { + l(); + try { + n(d); + } catch (v) { + o.f & Z || Ie(v, o); + } + Mt(); + } + if (r.length === 0) { + f.then(() => a(t.map(s))); + return; + } + var u = an(); + function c() { + Promise.all(r.map((d) => /* @__PURE__ */ Xs(d))).then((d) => a([...t.map(s), ...d])).catch((d) => Ie(d, o)).finally(() => u()); + } + f ? f.then(() => { + l(), c(), Mt(); + }) : c(); +} +function Zs() { + var e = ( + /** @type {Effect} */ + y + ), t = A, r = G, n = ( + /** @type {Batch} */ + S + ); + return function(i = !0) { + ge(e), oe(t), it(r), i && !(e.f & Z) && (n?.activate(), n?.apply()); + }; +} +function Mt(e = !0) { + ge(null), oe(null), it(null), e && S?.deactivate(); +} +function an() { + var e = ( + /** @type {Effect} */ + y + ), t = ( + /** @type {Boundary} */ + e.b + ), r = ( + /** @type {Batch} */ + S + ), n = t.is_rendered(); + return t.update_pending_count(1, r), r.increment(n, e), (s = !1) => { + t.update_pending_count(-1, r), r.decrement(n, e, s); + }; +} +// @__NO_SIDE_EFFECTS__ +function Pt(e) { + var t = H | z; + return y !== null && (y.f |= Xe), { + ctx: G, + deps: null, + effects: null, + equals: Wr, + f: t, + fn: e, + reactions: null, + rv: 0, + v: ( + /** @type {V} */ + P + ), + wv: 0, + parent: y, + ac: null + }; +} +// @__NO_SIDE_EFFECTS__ +function Xs(e, t, r) { + let n = ( + /** @type {Effect | null} */ + y + ); + n === null && xs(); + var s = ( + /** @type {Promise} */ + /** @type {unknown} */ + void 0 + ), i = Ze( + /** @type {V} */ + P + ), o = !A, l = /* @__PURE__ */ new Map(); + return li(() => { + var f = ( + /** @type {Effect} */ + y + ), a = qr(); + s = a.promise; + try { + Promise.resolve(e()).then(a.resolve, a.reject).finally(Mt); + } catch (v) { + a.reject(v), Mt(); + } + var u = ( + /** @type {Batch} */ + S + ); + if (o) { + if (f.f & Ce) + var c = an(); + if ( + /** @type {Boundary} */ + n.b.is_rendered() + ) + l.get(u)?.reject(ye), l.delete(u); + else { + for (const v of l.values()) + v.reject(ye); + l.clear(); + } + l.set(u, a); + } + const d = (v, h = void 0) => { + if (c) { + var p = h === ye; + c(p); + } + if (!(h === ye || f.f & Z)) { + if (u.activate(), h) + i.f |= De, lt(i, h); + else { + i.f & De && (i.f ^= De), lt(i, v); + for (const [_, $] of l) { + if (l.delete(_), _ === u) break; + $.reject(ye); + } + } + u.deactivate(); + } + }; + a.promise.then(d, (v) => d(null, v || "unknown")); + }), cr(() => { + for (const f of l.values()) + f.reject(ye); + }), new Promise((f) => { + function a(u) { + function c() { + u === s ? f(i) : a(s); + } + u.then(c, c); + } + a(s); + }); +} +// @__NO_SIDE_EFFECTS__ +function Le(e) { + const t = /* @__PURE__ */ Pt(e); + return Tn(t), t; +} +// @__NO_SIDE_EFFECTS__ +function fn(e) { + const t = /* @__PURE__ */ Pt(e); + return t.equals = Kr, t; +} +function Qs(e) { + var t = e.effects; + if (t !== null) { + e.effects = null; + for (var r = 0; r < t.length; r += 1) + Y( + /** @type {Effect} */ + t[r] + ); + } +} +function or(e) { + var t, r = y, n = e.parent; + if (!Me && n !== null && n.f & (Z | V)) + return Ps(), e.v; + ge(n); + try { + e.f &= ~Je, Qs(e), t = On(e); + } finally { + ge(r); + } + return t; +} +function un(e) { + var t = or(e); + if (!e.equals(t) && (e.wv = An(), (!S?.is_fork || e.deps === null) && (S !== null ? S.capture(e, t, !0) : e.v = t, e.deps === null))) { + R(e, D); + return; + } + Me || (ue !== null ? (ur() || S?.is_fork) && ue.set(e, t) : sr(e)); +} +function ei(e) { + if (e.effects !== null) + for (const t of e.effects) + (t.teardown || t.ac) && (t.teardown?.(), t.ac?.abort(ye), t.teardown = re, t.ac = null, mt(t, 0), pr(t)); +} +function cn(e) { + if (e.effects !== null) + for (const t of e.effects) + t.teardown && ot(t); +} +let Wt = /* @__PURE__ */ new Set(); +const Be = /* @__PURE__ */ new Map(); +let hn = !1; +function Ze(e, t) { + var r = { + f: 0, + // TODO ideally we could skip this altogether, but it causes type errors + v: e, + reactions: null, + equals: Wr, + rv: 0, + wv: 0 + }; + return r; +} +// @__NO_SIDE_EFFECTS__ +function me(e, t) { + const r = Ze(e); + return Tn(r), r; +} +// @__NO_SIDE_EFFECTS__ +function ar(e, t = !1, r = !0) { + const n = Ze(e); + return t || (n.equals = Kr), n; +} +function te(e, t, r = !1) { + A !== null && // since we are untracking the function inside `$inspect.with` we need to add this check + // to ensure we error if state is set inside an inspect effect + (!ce || A.f & Bt) && Jr() && A.f & (H | he | bt | Bt) && (le === null || !rt.call(le, e)) && Is(); + let n = r ? je(t) : t; + return lt(e, n, xt); +} +function lt(e, t, r = null) { + if (!e.equals(t)) { + Be.set(e, Me ? t : e.v); + var n = Oe.ensure(); + if (n.capture(e, t), e.f & H) { + const s = ( + /** @type {Derived} */ + e + ); + e.f & z && or(s), ue === null && sr(s); + } + e.wv = An(), dn(e, z, r), y !== null && y.f & D && !(y.f & (de | Ne)) && (Q === null ? ai([e]) : Q.push(e)), !n.is_fork && Wt.size > 0 && !hn && ti(); + } + return t; +} +function ti() { + hn = !1; + for (const e of Wt) + e.f & D && R(e, _e), $t(e) && ot(e); + Wt.clear(); +} +function _t(e) { + te(e, e.v + 1); +} +function dn(e, t, r) { + var n = e.reactions; + if (n !== null) + for (var s = n.length, i = 0; i < s; i++) { + var o = n[i], l = o.f, f = (l & z) === 0; + if (f && R(o, t), l & H) { + var a = ( + /** @type {Derived} */ + o + ); + ue?.delete(a), l & Je || (l & se && (y === null || !(y.f & Nt)) && (o.f |= Je), dn(a, _e, r)); + } else if (f) { + var u = ( + /** @type {Effect} */ + o + ); + l & he && we !== null && we.add(u), r !== null ? r.push(u) : lr(u); + } + } +} +function je(e) { + if (typeof e != "object" || e === null || vt in e) + return e; + const t = Hr(e); + if (t !== _s && t !== gs) + return e; + var r = /* @__PURE__ */ new Map(), n = jr(e), s = /* @__PURE__ */ me(0), i = Ve, o = (l) => { + if (Ve === i) + return l(); + var f = A, a = Ve; + oe(null), Ar(i); + var u = l(); + return oe(f), Ar(a), u; + }; + return n && r.set("length", /* @__PURE__ */ me( + /** @type {any[]} */ + e.length + )), new Proxy( + /** @type {any} */ + e, + { + defineProperty(l, f, a) { + (!("value" in a) || a.configurable === !1 || a.enumerable === !1 || a.writable === !1) && Rs(); + var u = r.get(f); + return u === void 0 ? o(() => { + var c = /* @__PURE__ */ me(a.value); + return r.set(f, c), c; + }) : te(u, a.value, !0), !0; + }, + deleteProperty(l, f) { + var a = r.get(f); + if (a === void 0) { + if (f in l) { + const u = o(() => /* @__PURE__ */ me(P)); + r.set(f, u), _t(s); + } + } else + te(a, P), _t(s); + return !0; + }, + get(l, f, a) { + if (f === vt) + return e; + var u = r.get(f), c = f in l; + if (u === void 0 && (!c || qe(l, f)?.writable) && (u = o(() => { + var v = je(c ? l[f] : P), h = /* @__PURE__ */ me(v); + return h; + }), r.set(f, u)), u !== void 0) { + var d = b(u); + return d === P ? void 0 : d; + } + return Reflect.get(l, f, a); + }, + getOwnPropertyDescriptor(l, f) { + var a = Reflect.getOwnPropertyDescriptor(l, f); + if (a && "value" in a) { + var u = r.get(f); + u && (a.value = b(u)); + } else if (a === void 0) { + var c = r.get(f), d = c?.v; + if (c !== void 0 && d !== P) + return { + enumerable: !0, + configurable: !0, + value: d, + writable: !0 + }; + } + return a; + }, + has(l, f) { + if (f === vt) + return !0; + var a = r.get(f), u = a !== void 0 && a.v !== P || Reflect.has(l, f); + if (a !== void 0 || y !== null && (!u || qe(l, f)?.writable)) { + a === void 0 && (a = o(() => { + var d = u ? je(l[f]) : P, v = /* @__PURE__ */ me(d); + return v; + }), r.set(f, a)); + var c = b(a); + if (c === P) + return !1; + } + return u; + }, + set(l, f, a, u) { + var c = r.get(f), d = f in l; + if (n && f === "length") + for (var v = a; v < /** @type {Source} */ + c.v; v += 1) { + var h = r.get(v + ""); + h !== void 0 ? te(h, P) : v in l && (h = o(() => /* @__PURE__ */ me(P)), r.set(v + "", h)); + } + if (c === void 0) + (!d || qe(l, f)?.writable) && (c = o(() => /* @__PURE__ */ me(void 0)), te(c, je(a)), r.set(f, c)); + else { + d = c.v !== P; + var p = o(() => je(a)); + te(c, p); + } + var _ = Reflect.getOwnPropertyDescriptor(l, f); + if (_?.set && _.set.call(u, a), !d) { + if (n && typeof f == "string") { + var $ = ( + /** @type {Source} */ + r.get("length") + ), g = Number(f); + Number.isInteger(g) && g >= $.v && te($, g + 1); + } + _t(s); + } + return !0; + }, + ownKeys(l) { + b(s); + var f = Reflect.ownKeys(l).filter((c) => { + var d = r.get(c); + return d === void 0 || d.v !== P; + }); + for (var [a, u] of r) + u.v !== P && !(a in l) && f.push(a); + return f; + }, + setPrototypeOf() { + Ls(); + } + } + ); +} +var Tr, vn, pn, _n; +function Gt() { + if (Tr === void 0) { + Tr = window, vn = /Firefox/.test(navigator.userAgent); + var e = Element.prototype, t = Node.prototype, r = Text.prototype; + pn = qe(t, "firstChild").get, _n = qe(t, "nextSibling").get, kr(e) && (e.__click = void 0, e.__className = void 0, e.__attributes = null, e.__style = void 0, e.__e = void 0), kr(r) && (r.__t = void 0); + } +} +function ie(e = "") { + return document.createTextNode(e); +} +// @__NO_SIDE_EFFECTS__ +function ne(e) { + return ( + /** @type {TemplateNode | null} */ + pn.call(e) + ); +} +// @__NO_SIDE_EFFECTS__ +function ve(e) { + return ( + /** @type {TemplateNode | null} */ + _n.call(e) + ); +} +function B(e, t) { + if (!N) + return /* @__PURE__ */ ne(e); + var r = /* @__PURE__ */ ne(x); + if (r === null) + r = x.appendChild(ie()); + else if (t && r.nodeType !== Lt) { + var n = ie(); + return r?.before(n), j(n), n; + } + return t && fr( + /** @type {Text} */ + r + ), j(r), r; +} +function Kt(e, t = !1) { + if (!N) { + var r = /* @__PURE__ */ ne(e); + return r instanceof Comment && r.data === "" ? /* @__PURE__ */ ve(r) : r; + } + if (t) { + if (x?.nodeType !== Lt) { + var n = ie(); + return x?.before(n), j(n), n; + } + fr( + /** @type {Text} */ + x + ); + } + return x; +} +function $e(e, t = 1, r = !1) { + let n = N ? x : e; + for (var s; t--; ) + s = n, n = /** @type {TemplateNode} */ + /* @__PURE__ */ ve(n); + if (!N) + return n; + if (r) { + if (n?.nodeType !== Lt) { + var i = ie(); + return n === null ? s?.after(i) : n.before(i), j(i), i; + } + fr( + /** @type {Text} */ + n + ); + } + return j(n), n; +} +function gn(e) { + e.textContent = ""; +} +function bn() { + return !1; +} +function Ft(e, t, r) { + return ( + /** @type {T extends keyof HTMLElementTagNameMap ? HTMLElementTagNameMap[T] : Element} */ + document.createElementNS(t ?? zr, e, void 0) + ); +} +function fr(e) { + if ( + /** @type {string} */ + e.nodeValue.length < 65536 + ) + return; + let t = e.nextSibling; + for (; t !== null && t.nodeType === Lt; ) + t.remove(), e.nodeValue += /** @type {string} */ + t.nodeValue, t = e.nextSibling; +} +function yt(e) { + var t = A, r = y; + oe(null), ge(null); + try { + return e(); + } finally { + oe(t), ge(r); + } +} +function ri(e) { + y === null && (A === null && Ns(), As()), Me && Ss(); +} +function ni(e, t) { + var r = t.last; + r === null ? t.last = t.first = e : (r.next = e, e.prev = r, t.last = e); +} +function be(e, t) { + var r = y; + r !== null && r.f & V && (e |= V); + var n = { + ctx: G, + deps: null, + nodes: null, + f: e | z | se, + first: null, + fn: t, + last: null, + next: null, + parent: r, + b: r && r.b, + prev: null, + teardown: null, + wv: 0, + ac: null + }; + S?.register_created_effect(n); + var s = n; + if (e & nt) + et !== null ? et.push(n) : Oe.ensure().schedule(n); + else if (t !== null) { + try { + ot(n); + } catch (o) { + throw Y(n), o; + } + s.deps === null && s.teardown === null && s.nodes === null && s.first === s.last && // either `null`, or a singular child + !(s.f & Xe) && (s = s.first, e & he && e & Ke && s !== null && (s.f |= Ke)); + } + if (s !== null && (s.parent = r, r !== null && ni(s, r), A !== null && A.f & H && !(e & Ne))) { + var i = ( + /** @type {Derived} */ + A + ); + (i.effects ??= []).push(s); + } + return n; +} +function ur() { + return A !== null && !ce; +} +function cr(e) { + const t = be(Rt, null); + return R(t, D), t.teardown = e, t; +} +function mn(e) { + ri(); + var t = ( + /** @type {Effect} */ + y.f + ), r = !A && (t & de) !== 0 && (t & Ce) === 0; + if (r) { + var n = ( + /** @type {ComponentContext} */ + G + ); + (n.e ??= []).push(e); + } else + return wn(e); +} +function wn(e) { + return be(nt | $s, e); +} +function si(e) { + Oe.ensure(); + const t = be(Ne | Xe, e); + return () => { + Y(t); + }; +} +function ii(e) { + Oe.ensure(); + const t = be(Ne | Xe, e); + return (r = {}) => new Promise((n) => { + r.outro ? Ue(t, () => { + Y(t), n(void 0); + }) : (Y(t), n(void 0)); + }); +} +function hr(e) { + return be(nt, e); +} +function li(e) { + return be(bt | Xe, e); +} +function dr(e, t = 0) { + return be(Rt | t, e); +} +function Te(e, t = [], r = [], n = []) { + Js(n, t, r, (s) => { + be(Rt, () => e(...s.map(b))); + }); +} +function vr(e, t = 0) { + var r = be(he | t, e); + return r; +} +function ee(e) { + return be(de | Xe, e); +} +function yn(e) { + var t = e.teardown; + if (t !== null) { + const r = Me, n = A; + Sr(!0), oe(null); + try { + t.call(null); + } finally { + Sr(r), oe(n); + } + } +} +function pr(e, t = !1) { + var r = e.first; + for (e.first = e.last = null; r !== null; ) { + const s = r.ac; + s !== null && yt(() => { + s.abort(ye); + }); + var n = r.next; + r.f & Ne ? r.parent = null : Y(r, t), r = n; + } +} +function oi(e) { + for (var t = e.first; t !== null; ) { + var r = t.next; + t.f & de || Y(t), t = r; + } +} +function Y(e, t = !0) { + var r = !1; + (t || e.f & ys) && e.nodes !== null && e.nodes.end !== null && ($n( + e.nodes.start, + /** @type {TemplateNode} */ + e.nodes.end + ), r = !0), R(e, qt), pr(e, t && !r), mt(e, 0); + var n = e.nodes && e.nodes.t; + if (n !== null) + for (const i of n) + i.stop(); + yn(e), e.f ^= qt, e.f |= Z; + var s = e.parent; + s !== null && s.first !== null && kn(e), e.next = e.prev = e.teardown = e.ctx = e.deps = e.fn = e.nodes = e.ac = e.b = null; +} +function $n(e, t) { + for (; e !== null; ) { + var r = e === t ? null : /* @__PURE__ */ ve(e); + e.remove(), e = r; + } +} +function kn(e) { + var t = e.parent, r = e.prev, n = e.next; + r !== null && (r.next = n), n !== null && (n.prev = r), t !== null && (t.first === e && (t.first = n), t.last === e && (t.last = r)); +} +function Ue(e, t, r = !0) { + var n = []; + En(e, n, !0); + var s = () => { + r && Y(e), t && t(); + }, i = n.length; + if (i > 0) { + var o = () => --i || s(); + for (var l of n) + l.out(o); + } else + s(); +} +function En(e, t, r) { + if (!(e.f & V)) { + e.f ^= V; + var n = e.nodes && e.nodes.t; + if (n !== null) + for (const l of n) + (l.is_global || r) && t.push(l); + for (var s = e.first; s !== null; ) { + var i = s.next; + if (!(s.f & Ne)) { + var o = (s.f & Ke) !== 0 || // If this is a branch effect without a block effect parent, + // it means the parent block effect was pruned. In that case, + // transparency information was transferred to the branch effect. + (s.f & de) !== 0 && (e.f & he) !== 0; + En(s, t, o ? r : !1); + } + s = i; + } + } +} +function _r(e) { + xn(e, !0); +} +function xn(e, t) { + if (e.f & V) { + e.f ^= V, e.f & D || (R(e, z), Oe.ensure().schedule(e)); + for (var r = e.first; r !== null; ) { + var n = r.next, s = (r.f & Ke) !== 0 || (r.f & de) !== 0; + xn(r, s ? t : !1), r = n; + } + var i = e.nodes && e.nodes.t; + if (i !== null) + for (const o of i) + (o.is_global || t) && o.in(); + } +} +function gr(e, t) { + if (e.nodes) + for (var r = e.nodes.start, n = e.nodes.end; r !== null; ) { + var s = r === n ? null : /* @__PURE__ */ ve(r); + t.append(r), r = s; + } +} +let Tt = !1, Me = !1; +function Sr(e) { + Me = e; +} +let A = null, ce = !1; +function oe(e) { + A = e; +} +let y = null; +function ge(e) { + y = e; +} +let le = null; +function Tn(e) { + A !== null && (le === null ? le = [e] : le.push(e)); +} +let W = null, J = 0, Q = null; +function ai(e) { + Q = e; +} +let Sn = 1, He = 0, Ve = He; +function Ar(e) { + Ve = e; +} +function An() { + return ++Sn; +} +function $t(e) { + var t = e.f; + if (t & z) + return !0; + if (t & H && (e.f &= ~Je), t & _e) { + for (var r = ( + /** @type {Value[]} */ + e.deps + ), n = r.length, s = 0; s < n; s++) { + var i = r[s]; + if ($t( + /** @type {Derived} */ + i + ) && un( + /** @type {Derived} */ + i + ), i.wv > e.wv) + return !0; + } + t & se && // During time traveling we don't want to reset the status so that + // traversal of the graph in the other batches still happens + ue === null && R(e, D); + } + return !1; +} +function Nn(e, t, r = !0) { + var n = e.reactions; + if (n !== null && !(le !== null && rt.call(le, e))) + for (var s = 0; s < n.length; s++) { + var i = n[s]; + i.f & H ? Nn( + /** @type {Derived} */ + i, + t, + !1 + ) : t === i && (r ? R(i, z) : i.f & D && R(i, _e), lr( + /** @type {Effect} */ + i + )); + } +} +function On(e) { + var t = W, r = J, n = Q, s = A, i = le, o = G, l = ce, f = Ve, a = e.f; + W = /** @type {null | Value[]} */ + null, J = 0, Q = null, A = a & (de | Ne) ? null : e, le = null, it(e.ctx), ce = !1, Ve = ++He, e.ac !== null && (yt(() => { + e.ac.abort(ye); + }), e.ac = null); + try { + e.f |= Nt; + var u = ( + /** @type {Function} */ + e.fn + ), c = u(); + e.f |= Ce; + var d = e.deps, v = S?.is_fork; + if (W !== null) { + var h; + if (v || mt(e, J), d !== null && J > 0) + for (d.length = J + W.length, h = 0; h < W.length; h++) + d[J + h] = W[h]; + else + e.deps = d = W; + if (ur() && e.f & se) + for (h = J; h < d.length; h++) + (d[h].reactions ??= []).push(e); + } else !v && d !== null && J < d.length && (mt(e, J), d.length = J); + if (Jr() && Q !== null && !ce && d !== null && !(e.f & (H | _e | z))) + for (h = 0; h < /** @type {Source[]} */ + Q.length; h++) + Nn( + Q[h], + /** @type {Effect} */ + e + ); + if (s !== null && s !== e) { + if (He++, s.deps !== null) + for (let p = 0; p < r; p += 1) + s.deps[p].rv = He; + if (t !== null) + for (const p of t) + p.rv = He; + Q !== null && (n === null ? n = Q : n.push(.../** @type {Source[]} */ + Q)); + } + return e.f & De && (e.f ^= De), c; + } catch (p) { + return Xr(p); + } finally { + e.f ^= Nt, W = t, J = r, Q = n, A = s, le = i, it(o), ce = l, Ve = f; + } +} +function fi(e, t) { + let r = t.reactions; + if (r !== null) { + var n = vs.call(r, e); + if (n !== -1) { + var s = r.length - 1; + s === 0 ? r = t.reactions = null : (r[n] = r[s], r.pop()); + } + } + if (r === null && t.f & H && // Destroying a child effect while updating a parent effect can cause a dependency to appear + // to be unused, when in fact it is used by the currently-updating parent. Checking `new_deps` + // allows us to skip the expensive work of disconnecting and immediately reconnecting it + (W === null || !rt.call(W, t))) { + var i = ( + /** @type {Derived} */ + t + ); + i.f & se && (i.f ^= se, i.f &= ~Je), i.v !== P && sr(i), ei(i), mt(i, 0); + } +} +function mt(e, t) { + var r = e.deps; + if (r !== null) + for (var n = t; n < r.length; n++) + fi(e, r[n]); +} +function ot(e) { + var t = e.f; + if (!(t & Z)) { + R(e, D); + var r = y, n = Tt; + y = e, Tt = !0; + try { + t & (he | Br) ? oi(e) : pr(e), yn(e); + var s = On(e); + e.teardown = typeof s == "function" ? s : null, e.wv = Sn; + var i; + ds && zs && e.f & z && e.deps; + } finally { + Tt = n, y = r; + } + } +} +async function ui() { + await Promise.resolve(), Se(); +} +function b(e) { + var t = e.f, r = (t & H) !== 0; + if (A !== null && !ce) { + var n = y !== null && (y.f & Z) !== 0; + if (!n && (le === null || !rt.call(le, e))) { + var s = A.deps; + if (A.f & Nt) + e.rv < He && (e.rv = He, W === null && s !== null && s[J] === e ? J++ : W === null ? W = [e] : W.push(e)); + else { + (A.deps ??= []).push(e); + var i = e.reactions; + i === null ? e.reactions = [A] : rt.call(i, A) || i.push(A); + } + } + } + if (Me && Be.has(e)) + return Be.get(e); + if (r) { + var o = ( + /** @type {Derived} */ + e + ); + if (Me) { + var l = o.v; + return (!(o.f & D) && o.reactions !== null || Cn(o)) && (l = or(o)), Be.set(o, l), l; + } + var f = (o.f & se) === 0 && !ce && A !== null && (Tt || (A.f & se) !== 0), a = (o.f & Ce) === 0; + $t(o) && (f && (o.f |= se), un(o)), f && !a && (cn(o), Mn(o)); + } + if (ue?.has(e)) + return ue.get(e); + if (e.f & De) + throw e.v; + return e.v; +} +function Mn(e) { + if (e.f |= se, e.deps !== null) + for (const t of e.deps) + (t.reactions ??= []).push(e), t.f & H && !(t.f & se) && (cn( + /** @type {Derived} */ + t + ), Mn( + /** @type {Derived} */ + t + )); +} +function Cn(e) { + if (e.v === P) return !0; + if (e.deps === null) return !1; + for (const t of e.deps) + if (Be.has(t) || t.f & H && Cn( + /** @type {Derived} */ + t + )) + return !0; + return !1; +} +function ft(e) { + var t = ce; + try { + return ce = !0, e(); + } finally { + ce = t; + } +} +const ht = Symbol("events"), Rn = /* @__PURE__ */ new Set(), Jt = /* @__PURE__ */ new Set(); +function ci(e, t, r, n = {}) { + function s(i) { + if (n.capture || Zt.call(t, i), !i.cancelBubble) + return yt(() => r?.call(this, i)); + } + return e.startsWith("pointer") || e.startsWith("touch") || e === "wheel" ? Pe(() => { + t.addEventListener(e, s, n); + }) : t.addEventListener(e, s, n), s; +} +function hi(e, t, r, n, s) { + var i = { capture: n, passive: s }, o = ci(e, t, r, i); + (t === document.body || // @ts-ignore + t === window || // @ts-ignore + t === document || // Firefox has quirky behavior, it can happen that we still get "canplay" events when the element is already removed + t instanceof HTMLMediaElement) && cr(() => { + t.removeEventListener(e, o, i); + }); +} +function Nr(e, t, r) { + (t[ht] ??= {})[e] = r; +} +function di(e) { + for (var t = 0; t < e.length; t++) + Rn.add(e[t]); + for (var r of Jt) + r(e); +} +let Or = null; +function Zt(e) { + var t = this, r = ( + /** @type {Node} */ + t.ownerDocument + ), n = e.type, s = e.composedPath?.() || [], i = ( + /** @type {null | Element} */ + s[0] || e.target + ); + Or = e; + var o = 0, l = Or === e && e[ht]; + if (l) { + var f = s.indexOf(l); + if (f !== -1 && (t === document || t === /** @type {any} */ + window)) { + e[ht] = t; + return; + } + var a = s.indexOf(t); + if (a === -1) + return; + f <= a && (o = f); + } + if (i = /** @type {Element} */ + s[o] || e.target, i !== t) { + gt(e, "currentTarget", { + configurable: !0, + get() { + return i || r; + } + }); + var u = A, c = y; + oe(null), ge(null); + try { + for (var d, v = []; i !== null; ) { + var h = i.assignedSlot || i.parentNode || /** @type {any} */ + i.host || null; + try { + var p = i[ht]?.[n]; + p != null && (!/** @type {any} */ + i.disabled || // DOM could've been updated already by the time this is reached, so we check this as well + // -> the target could not have been disabled because it emits the event in the first place + e.target === i) && p.call(i, e); + } catch (_) { + d ? v.push(_) : d = _; + } + if (e.cancelBubble || h === t || h === null) + break; + i = h; + } + if (d) { + for (let _ of v) + queueMicrotask(() => { + throw _; + }); + throw d; + } + } finally { + e[ht] = t, delete e.currentTarget, oe(u), ge(c); + } + } +} +const vi = ( + // We gotta write it like this because after downleveling the pure comment may end up in the wrong location + globalThis?.window?.trustedTypes && /* @__PURE__ */ globalThis.window.trustedTypes.createPolicy("svelte-trusted-html", { + /** @param {string} html */ + createHTML: (e) => e + }) +); +function pi(e) { + return ( + /** @type {string} */ + vi?.createHTML(e) ?? e + ); +} +function _i(e) { + var t = Ft("template"); + return t.innerHTML = pi(e.replaceAll("", "")), t.content; +} +function Ae(e, t) { + var r = ( + /** @type {Effect} */ + y + ); + r.nodes === null && (r.nodes = { start: e, end: t, a: null, t: null }); +} +// @__NO_SIDE_EFFECTS__ +function ae(e, t) { + var r = (t & fs) !== 0, n = (t & us) !== 0, s, i = !e.startsWith(""); + return () => { + if (N) + return Ae(x, null), x; + s === void 0 && (s = _i(i ? e : "" + e), r || (s = /** @type {TemplateNode} */ + /* @__PURE__ */ ne(s))); + var o = ( + /** @type {TemplateNode} */ + n || vn ? document.importNode(s, !0) : s.cloneNode(!0) + ); + if (r) { + var l = ( + /** @type {TemplateNode} */ + /* @__PURE__ */ ne(o) + ), f = ( + /** @type {TemplateNode} */ + o.lastChild + ); + Ae(l, f); + } else + Ae(o, o); + return o; + }; +} +function Ln() { + if (N) + return Ae(x, null), x; + var e = document.createDocumentFragment(), t = document.createComment(""), r = ie(); + return e.append(t, r), Ae(t, r), e; +} +function U(e, t) { + if (N) { + var r = ( + /** @type {Effect & { nodes: EffectNodes }} */ + y + ); + (!(r.f & Ce) || r.nodes.end === null) && (r.nodes.end = x), st(); + return; + } + e !== null && e.before( + /** @type {Node} */ + t + ); +} +const gi = ["touchstart", "touchmove"]; +function bi(e) { + return gi.includes(e); +} +let Xt = !0; +function fe(e, t) { + var r = t == null ? "" : typeof t == "object" ? `${t}` : t; + r !== (e.__t ??= e.nodeValue) && (e.__t = r, e.nodeValue = `${r}`); +} +function In(e, t) { + return Dn(e, t); +} +function mi(e, t) { + Gt(), t.intro = t.intro ?? !1; + const r = t.target, n = N, s = x; + try { + for (var i = /* @__PURE__ */ ne(r); i && (i.nodeType !== at || /** @type {Comment} */ + i.data !== Fr); ) + i = /* @__PURE__ */ ve(i); + if (!i) + throw Ge; + xe(!0), j( + /** @type {Comment} */ + i + ); + const o = Dn(e, { ...t, anchor: i }); + return xe(!1), /** @type {Exports} */ + o; + } catch (o) { + if (o instanceof Error && o.message.split(` +`).some((l) => l.startsWith("https://svelte.dev/e/"))) + throw o; + return o !== Ge && console.warn("Failed to hydrate: ", o), t.recover === !1 && Ms(), Gt(), gn(r), xe(!1), In(e, t); + } finally { + xe(n), j(s); + } +} +const Et = /* @__PURE__ */ new Map(); +function Dn(e, { target: t, anchor: r, props: n = {}, events: s, context: i, intro: o = !0, transformError: l }) { + Gt(); + var f = void 0, a = ii(() => { + var u = r ?? t.appendChild(ie()); + Gs( + /** @type {TemplateNode} */ + u, + { + pending: () => { + } + }, + (v) => { + It({}); + var h = ( + /** @type {ComponentContext} */ + G + ); + if (i && (h.c = i), s && (n.$$events = s), N && Ae( + /** @type {TemplateNode} */ + v, + null + ), Xt = o, f = e(v, n) || {}, Xt = !0, N && (y.nodes.end = x, x === null || x.nodeType !== at || /** @type {Comment} */ + x.data !== nr)) + throw wt(), Ge; + Dt(); + }, + l + ); + var c = /* @__PURE__ */ new Set(), d = (v) => { + for (var h = 0; h < v.length; h++) { + var p = v[h]; + if (!c.has(p)) { + c.add(p); + var _ = bi(p); + for (const C of [t, document]) { + var $ = Et.get(C); + $ === void 0 && ($ = /* @__PURE__ */ new Map(), Et.set(C, $)); + var g = $.get(p); + g === void 0 ? (C.addEventListener(p, Zt, { passive: _ }), $.set(p, 1)) : $.set(p, g + 1); + } + } + } + }; + return d(Ct(Rn)), Jt.add(d), () => { + for (var v of c) + for (const _ of [t, document]) { + var h = ( + /** @type {Map} */ + Et.get(_) + ), p = ( + /** @type {number} */ + h.get(v) + ); + --p == 0 ? (_.removeEventListener(v, Zt), h.delete(v), h.size === 0 && Et.delete(_)) : h.set(v, p); + } + Jt.delete(d), u !== r && u.parentNode?.removeChild(u); + }; + }); + return Qt.set(f, a), f; +} +let Qt = /* @__PURE__ */ new WeakMap(); +function wi(e, t) { + const r = Qt.get(e); + return r ? (Qt.delete(e), r(t)) : Promise.resolve(); +} +class yi { + /** @type {TemplateNode} */ + anchor; + /** @type {Map} */ + #e = /* @__PURE__ */ new Map(); + /** + * Map of keys to effects that are currently rendered in the DOM. + * These effects are visible and actively part of the document tree. + * Example: + * ``` + * {#if condition} + * foo + * {:else} + * bar + * {/if} + * ``` + * Can result in the entries `true->Effect` and `false->Effect` + * @type {Map} + */ + #n = /* @__PURE__ */ new Map(); + /** + * Similar to #onscreen with respect to the keys, but contains branches that are not yet + * in the DOM, because their insertion is deferred. + * @type {Map} + */ + #t = /* @__PURE__ */ new Map(); + /** + * Keys of effects that are currently outroing + * @type {Set} + */ + #i = /* @__PURE__ */ new Set(); + /** + * Whether to pause (i.e. outro) on change, or destroy immediately. + * This is necessary for `` + */ + #s = !0; + /** + * @param {TemplateNode} anchor + * @param {boolean} transition + */ + constructor(t, r = !0) { + this.anchor = t, this.#s = r; + } + /** + * @param {Batch} batch + */ + #l = (t) => { + if (this.#e.has(t)) { + var r = ( + /** @type {Key} */ + this.#e.get(t) + ), n = this.#n.get(r); + if (n) + _r(n), this.#i.delete(r); + else { + var s = this.#t.get(r); + s && (this.#n.set(r, s.effect), this.#t.delete(r), s.fragment.lastChild.remove(), this.anchor.before(s.fragment), n = s.effect); + } + for (const [i, o] of this.#e) { + if (this.#e.delete(i), i === t) + break; + const l = this.#t.get(o); + l && (Y(l.effect), this.#t.delete(o)); + } + for (const [i, o] of this.#n) { + if (i === r || this.#i.has(i)) continue; + const l = () => { + if (Array.from(this.#e.values()).includes(i)) { + var a = document.createDocumentFragment(); + gr(o, a), a.append(ie()), this.#t.set(i, { effect: o, fragment: a }); + } else + Y(o); + this.#i.delete(i), this.#n.delete(i); + }; + this.#s || !n ? (this.#i.add(i), Ue(o, l, !1)) : l(); + } + } + }; + /** + * @param {Batch} batch + */ + #r = (t) => { + this.#e.delete(t); + const r = Array.from(this.#e.values()); + for (const [n, s] of this.#t) + r.includes(n) || (Y(s.effect), this.#t.delete(n)); + }; + /** + * + * @param {any} key + * @param {null | ((target: TemplateNode) => void)} fn + */ + ensure(t, r) { + var n = ( + /** @type {Batch} */ + S + ), s = bn(); + if (r && !this.#n.has(t) && !this.#t.has(t)) + if (s) { + var i = document.createDocumentFragment(), o = ie(); + i.append(o), this.#t.set(t, { + effect: ee(() => r(o)), + fragment: i + }); + } else + this.#n.set( + t, + ee(() => r(this.anchor)) + ); + if (this.#e.set(n, t), s) { + for (const [l, f] of this.#n) + l === t ? n.unskip_effect(f) : n.skip_effect(f); + for (const [l, f] of this.#t) + l === t ? n.unskip_effect(f.effect) : n.skip_effect(f.effect); + n.oncommit(this.#l), n.ondiscard(this.#r); + } else + N && (this.anchor = x), this.#l(n); + } +} +function $i(e) { + G === null && Es(), mn(() => { + const t = ft(e); + if (typeof t == "function") return ( + /** @type {() => void} */ + t + ); + }); +} +function Ye(e, t, r = !1) { + var n; + N && (n = x, st()); + var s = new yi(e), i = r ? Ke : 0; + function o(l, f) { + if (N) { + var a = Yr( + /** @type {TemplateNode} */ + n + ); + if (l !== parseInt(a.substring(1))) { + var u = Ot(); + j(u), s.anchor = u, xe(!1), s.ensure(l, f), xe(!0); + return; + } + } + s.ensure(l, f); + } + vr(() => { + var l = !1; + t((f, a = 0) => { + l = !0, o(a, f); + }), l || o(-1, null); + }, i); +} +function ki(e, t) { + return t; +} +function Ei(e, t, r) { + for (var n = [], s = t.length, i, o = t.length, l = 0; l < s; l++) { + let c = t[l]; + Ue( + c, + () => { + if (i) { + if (i.pending.delete(c), i.done.add(c), i.pending.size === 0) { + var d = ( + /** @type {Set} */ + e.outrogroups + ); + er(e, Ct(i.done)), d.delete(i), d.size === 0 && (e.outrogroups = null); + } + } else + o -= 1; + }, + !1 + ); + } + if (o === 0) { + var f = n.length === 0 && r !== null; + if (f) { + var a = ( + /** @type {Element} */ + r + ), u = ( + /** @type {Element} */ + a.parentNode + ); + gn(u), u.append(a), e.items.clear(); + } + er(e, t, !f); + } else + i = { + pending: new Set(t), + done: /* @__PURE__ */ new Set() + }, (e.outrogroups ??= /* @__PURE__ */ new Set()).add(i); +} +function er(e, t, r = !0) { + var n; + if (e.pending.size > 0) { + n = /* @__PURE__ */ new Set(); + for (const o of e.pending.values()) + for (const l of o) + n.add( + /** @type {EachItem} */ + e.items.get(l).e + ); + } + for (var s = 0; s < t.length; s++) { + var i = t[s]; + if (n?.has(i)) { + i.f |= Ee; + const o = document.createDocumentFragment(); + gr(i, o); + } else + Y(t[s], r); + } +} +var Mr; +function Pn(e, t, r, n, s, i = null) { + var o = e, l = /* @__PURE__ */ new Map(); + { + var f = ( + /** @type {Element} */ + e + ); + o = N ? j(/* @__PURE__ */ ne(f)) : f.appendChild(ie()); + } + N && st(); + var a = null, u = /* @__PURE__ */ fn(() => { + var g = r(); + return jr(g) ? g : g == null ? [] : Ct(g); + }), c, d = /* @__PURE__ */ new Map(), v = !0; + function h(g) { + $.effect.f & Z || ($.pending.delete(g), $.fallback = a, xi($, c, o, t, n), a !== null && (c.length === 0 ? a.f & Ee ? (a.f ^= Ee, dt(a, null, o)) : _r(a) : Ue(a, () => { + a = null; + }))); + } + function p(g) { + $.pending.delete(g); + } + var _ = vr(() => { + c = /** @type {V[]} */ + b(u); + var g = c.length; + let C = !1; + if (N) { + var w = Yr(o) === rr; + w !== (g === 0) && (o = Ot(), j(o), xe(!1), C = !0); + } + for (var m = /* @__PURE__ */ new Set(), T = ( + /** @type {Batch} */ + S + ), M = bn(), E = 0; E < g; E += 1) { + N && x.nodeType === at && /** @type {Comment} */ + x.data === nr && (o = /** @type {Comment} */ + x, C = !0, xe(!1)); + var L = c[E], q = n(L, E), O = v ? null : l.get(q); + O ? (O.v && lt(O.v, L), O.i && lt(O.i, E), M && T.unskip_effect(O.e)) : (O = Ti( + l, + v ? o : Mr ??= ie(), + L, + q, + E, + s, + t, + r + ), v || (O.e.f |= Ee), l.set(q, O)), m.add(q); + } + if (g === 0 && i && !a && (v ? a = ee(() => i(o)) : (a = ee(() => i(Mr ??= ie())), a.f |= Ee)), g > m.size && Ts(), N && g > 0 && j(Ot()), !v) + if (d.set(T, m), M) { + for (const [k, I] of l) + m.has(k) || T.skip_effect(I.e); + T.oncommit(h), T.ondiscard(p); + } else + h(T); + C && xe(!0), b(u); + }), $ = { effect: _, items: l, pending: d, outrogroups: null, fallback: a }; + v = !1, N && (o = x); +} +function ct(e) { + for (; e !== null && !(e.f & de); ) + e = e.next; + return e; +} +function xi(e, t, r, n, s) { + var i = t.length, o = e.items, l = ct(e.effect.first), f, a = null, u = [], c = [], d, v, h, p; + for (p = 0; p < i; p += 1) { + if (d = t[p], v = s(d, p), h = /** @type {EachItem} */ + o.get(v).e, e.outrogroups !== null) + for (const E of e.outrogroups) + E.pending.delete(h), E.done.delete(h); + if (h.f & V && _r(h), h.f & Ee) + if (h.f ^= Ee, h === l) + dt(h, null, r); + else { + var _ = a ? a.next : l; + h === e.effect.last && (e.effect.last = h.prev), h.prev && (h.prev.next = h.next), h.next && (h.next.prev = h.prev), Re(e, a, h), Re(e, h, _), dt(h, _, r), a = h, u = [], c = [], l = ct(a.next); + continue; + } + if (h !== l) { + if (f !== void 0 && f.has(h)) { + if (u.length < c.length) { + var $ = c[0], g; + a = $.prev; + var C = u[0], w = u[u.length - 1]; + for (g = 0; g < u.length; g += 1) + dt(u[g], $, r); + for (g = 0; g < c.length; g += 1) + f.delete(c[g]); + Re(e, C.prev, w.next), Re(e, a, C), Re(e, w, $), l = $, a = w, p -= 1, u = [], c = []; + } else + f.delete(h), dt(h, l, r), Re(e, h.prev, h.next), Re(e, h, a === null ? e.effect.first : a.next), Re(e, a, h), a = h; + continue; + } + for (u = [], c = []; l !== null && l !== h; ) + (f ??= /* @__PURE__ */ new Set()).add(l), c.push(l), l = ct(l.next); + if (l === null) + continue; + } + h.f & Ee || u.push(h), a = h, l = ct(h.next); + } + if (e.outrogroups !== null) { + for (const E of e.outrogroups) + E.pending.size === 0 && (er(e, Ct(E.done)), e.outrogroups?.delete(E)); + e.outrogroups.size === 0 && (e.outrogroups = null); + } + if (l !== null || f !== void 0) { + var m = []; + if (f !== void 0) + for (h of f) + h.f & V || m.push(h); + for (; l !== null; ) + !(l.f & V) && l !== e.fallback && m.push(l), l = ct(l.next); + var T = m.length; + if (T > 0) { + var M = i === 0 ? r : null; + Ei(e, m, M); + } + } +} +function Ti(e, t, r, n, s, i, o, l) { + var f = o & ts ? o & ns ? Ze(r) : /* @__PURE__ */ ar(r, !1, !1) : null, a = o & rs ? Ze(s) : null; + return { + v: f, + i: a, + e: ee(() => (i(t, f ?? r, a ?? s, l), () => { + e.delete(n); + })) + }; +} +function dt(e, t, r) { + if (e.nodes) + for (var n = e.nodes.start, s = e.nodes.end, i = t && !(t.f & Ee) ? ( + /** @type {EffectNodes} */ + t.nodes.start + ) : r; n !== null; ) { + var o = ( + /** @type {TemplateNode} */ + /* @__PURE__ */ ve(n) + ); + if (i.before(n), n === s) + return; + n = o; + } +} +function Re(e, t, r) { + t === null ? e.effect.first = r : t.next = r, r === null ? e.effect.last = t : r.prev = t; +} +function Si(e, t, r = !1, n = !1, s = !1, i = !1) { + var o = e, l = ""; + if (r) { + var f = ( + /** @type {Element} */ + e + ); + N && (o = j(/* @__PURE__ */ ne(f))); + } + Te(() => { + var a = ( + /** @type {Effect} */ + y + ); + if (l === (l = t() ?? "")) { + N && st(); + return; + } + if (r && !N) { + a.nodes = null, f.innerHTML = /** @type {string} */ + l, l !== "" && Ae( + /** @type {TemplateNode} */ + /* @__PURE__ */ ne(f), + /** @type {TemplateNode} */ + f.lastChild + ); + return; + } + if (a.nodes !== null && ($n( + a.nodes.start, + /** @type {TemplateNode} */ + a.nodes.end + ), a.nodes = null), l !== "") { + if (N) { + x.data; + for (var u = st(), c = u; u !== null && (u.nodeType !== at || /** @type {Comment} */ + u.data !== ""); ) + c = u, u = /* @__PURE__ */ ve(u); + if (u === null) + throw wt(), Ge; + Ae(x, c), o = j(u); + return; + } + var d = n ? cs : s ? hs : void 0, v = ( + /** @type {HTMLTemplateElement | SVGElement | MathMLElement} */ + Ft(n ? "svg" : s ? "math" : "template", d) + ); + v.innerHTML = /** @type {any} */ + l; + var h = n || s ? v : ( + /** @type {HTMLTemplateElement} */ + v.content + ); + if (Ae( + /** @type {TemplateNode} */ + /* @__PURE__ */ ne(h), + /** @type {TemplateNode} */ + h.lastChild + ), n || s) + for (; /* @__PURE__ */ ne(h); ) + o.before( + /** @type {TemplateNode} */ + /* @__PURE__ */ ne(h) + ); + else + o.before(h); + } + }); +} +const Ai = () => performance.now(), ke = { + // don't access requestAnimationFrame eagerly outside method + // this allows basic testing of user code without JSDOM + // bunder will eval and remove ternary when the user's app is built + tick: ( + /** @param {any} _ */ + (e) => requestAnimationFrame(e) + ), + now: () => Ai(), + tasks: /* @__PURE__ */ new Set() +}; +function Fn() { + const e = ke.now(); + ke.tasks.forEach((t) => { + t.c(e) || (ke.tasks.delete(t), t.f()); + }), ke.tasks.size !== 0 && ke.tick(Fn); +} +function Ni(e) { + let t; + return ke.tasks.size === 0 && ke.tick(Fn), { + promise: new Promise((r) => { + ke.tasks.add(t = { c: e, f: r }); + }), + abort() { + ke.tasks.delete(t); + } + }; +} +function Cr(e, t) { + yt(() => { + e.dispatchEvent(new CustomEvent(t)); + }); +} +function Oi(e) { + if (e === "float") return "cssFloat"; + if (e === "offset") return "cssOffset"; + if (e.startsWith("--")) return e; + const t = e.split("-"); + return t.length === 1 ? t[0] : t[0] + t.slice(1).map( + /** @param {any} word */ + (r) => r[0].toUpperCase() + r.slice(1) + ).join(""); +} +function Rr(e) { + const t = {}, r = e.split(";"); + for (const n of r) { + const [s, i] = n.split(":"); + if (!s || i === void 0) break; + const o = Oi(s.trim()); + t[o] = i.trim(); + } + return t; +} +const Mi = (e) => e; +function tr(e, t, r, n) { + var s = (e & as) !== 0, i = "in", o, l = t.inert, f = t.style.overflow, a, u; + function c() { + return yt(() => o ??= r()(t, n?.() ?? /** @type {P} */ + {}, { + direction: i + })); + } + var d = { + is_global: s, + in() { + t.inert = l, a?.abort(), a = zn( + t, + c(), + u, + 1, + () => { + Cr(t, "introstart"); + }, + () => { + Cr(t, "introend"), a?.abort(), a = o = void 0, t.style.overflow = f; + } + ); + }, + out(_) { + { + _?.(), o = void 0; + return; + } + }, + stop: () => { + a?.abort(); + } + }, v = ( + /** @type {Effect & { nodes: EffectNodes }} */ + y + ); + if ((v.nodes.t ??= []).push(d), Xt) { + var h = s; + if (!h) { + for (var p = ( + /** @type {Effect | null} */ + v.parent + ); p && p.f & Ke; ) + for (; (p = p.parent) && !(p.f & he); ) + ; + h = !p || (p.f & Ce) !== 0; + } + h && hr(() => { + ft(() => d.in()); + }); + } +} +function zn(e, t, r, n, s, i) { + if (bs(t)) { + var o, l = !1; + return Pe(() => { + if (!l) { + var _ = t({ direction: "in" }); + o = zn(e, _, r, n, s, i); + } + }), { + abort: () => { + l = !0, o?.abort(); + }, + deactivate: () => o.deactivate(), + reset: () => o.reset(), + t: () => o.t() + }; + } + if (!t?.duration && !t?.delay) + return s(), i(), { + abort: re, + deactivate: re, + reset: re, + t: () => n + }; + const { delay: f = 0, css: a, tick: u, easing: c = Mi } = t; + var d = []; + if (u && u(0, 1), a) { + var v = Rr(a(0, 1)); + d.push(v, v); + } + var h = () => 1 - n, p = e.animate(d, { duration: f, fill: "forwards" }); + return p.onfinish = () => { + p.cancel(), s(); + var _ = 1 - n, $ = n - _, g = ( + /** @type {number} */ + t.duration * Math.abs($) + ), C = []; + if (g > 0) { + var w = !1; + if (a) + for (var m = Math.ceil(g / 16.666666666666668), T = 0; T <= m; T += 1) { + var M = _ + $ * c(T / m), E = Rr(a(M, 1 - M)); + C.push(E), w ||= E.overflow === "hidden"; + } + w && (e.style.overflow = "hidden"), h = () => { + var L = ( + /** @type {number} */ + /** @type {globalThis.Animation} */ + p.currentTime + ); + return _ + $ * c(L / g); + }, u && Ni(() => { + if (p.playState !== "running") return !1; + var L = h(); + return u(L, 1 - L), !0; + }); + } + p = e.animate(C, { duration: g, fill: "forwards" }), p.onfinish = () => { + h = () => n, u?.(n, 1 - n), i(); + }; + }, { + abort: () => { + p && (p.cancel(), p.effect = null, p.onfinish = re); + }, + deactivate: () => { + i = re; + }, + reset: () => { + }, + t: () => h() + }; +} +function br(e, t) { + hr(() => { + var r = e.getRootNode(), n = ( + /** @type {ShadowRoot} */ + r.host ? ( + /** @type {ShadowRoot} */ + r + ) : ( + /** @type {Document} */ + r.head ?? /** @type {Document} */ + r.ownerDocument.head + ) + ); + if (!n.querySelector("#" + t.hash)) { + const s = Ft("style"); + s.id = t.hash, s.textContent = t.code, n.appendChild(s); + } + }); +} +function jn(e) { + var t, r, n = ""; + if (typeof e == "string" || typeof e == "number") n += e; + else if (typeof e == "object") if (Array.isArray(e)) { + var s = e.length; + for (t = 0; t < s; t++) e[t] && (r = jn(e[t])) && (n && (n += " "), n += r); + } else for (r in e) e[r] && (n && (n += " "), n += r); + return n; +} +function Ci() { + for (var e, t, r = 0, n = "", s = arguments.length; r < s; r++) (e = arguments[r]) && (t = jn(e)) && (n && (n += " "), n += t); + return n; +} +function Ri(e) { + return typeof e == "object" ? Ci(e) : e ?? ""; +} +const Lr = [...` +\r\f \v\uFEFF`]; +function Li(e, t, r) { + var n = e == null ? "" : "" + e; + if (t && (n = n ? n + " " + t : t), r) { + for (var s of Object.keys(r)) + if (r[s]) + n = n ? n + " " + s : s; + else if (n.length) + for (var i = s.length, o = 0; (o = n.indexOf(s, o)) >= 0; ) { + var l = o + i; + (o === 0 || Lr.includes(n[o - 1])) && (l === n.length || Lr.includes(n[l])) ? n = (o === 0 ? "" : n.substring(0, o)) + n.substring(l + 1) : o = l; + } + } + return n === "" ? null : n; +} +function Hn(e, t, r, n, s, i) { + var o = e.__className; + if (N || o !== r || o === void 0) { + var l = Li(r, n, i); + (!N || l !== e.getAttribute("class")) && (l == null ? e.removeAttribute("class") : e.className = l), e.__className = r; + } else if (i && s !== i) + for (var f in i) { + var a = !!i[f]; + (s == null || a !== !!s[f]) && e.classList.toggle(f, a); + } + return i; +} +const Ii = Symbol("is custom element"), Di = Symbol("is html"), Pi = ks ? "link" : "LINK"; +function Fi(e, t, r, n) { + var s = zi(e); + N && (s[t] = e.getAttribute(t), e.nodeName === Pi) || s[t] !== (s[t] = r) && (r == null ? e.removeAttribute(t) : typeof r != "string" && ji(e).includes(t) ? e[t] = r : e.setAttribute(t, r)); +} +function zi(e) { + return ( + /** @type {Record} **/ + // @ts-expect-error + e.__attributes ??= { + [Ii]: e.nodeName.includes("-"), + [Di]: e.namespaceURI === zr + } + ); +} +var Ir = /* @__PURE__ */ new Map(); +function ji(e) { + var t = e.getAttribute("is") || e.nodeName, r = Ir.get(t); + if (r) return r; + Ir.set(t, r = []); + for (var n, s = e, i = Element.prototype; i !== s; ) { + n = ps(s); + for (var o in n) + n[o].set && r.push(o); + s = Hr(s); + } + return r; +} +function Dr(e, t) { + return e === t || e?.[vt] === t; +} +function qn(e = {}, t, r, n) { + var s = ( + /** @type {ComponentContext} */ + G.r + ), i = ( + /** @type {Effect} */ + y + ); + return hr(() => { + var o, l; + return dr(() => { + o = l, l = [], ft(() => { + e !== r(...l) && (t(e, ...l), o && Dr(r(...o), e) && t(null, ...o)); + }); + }), () => { + let f = i; + for (; f !== s && f.parent !== null && f.parent.f & qt; ) + f = f.parent; + const a = () => { + l && Dr(r(...l), e) && t(null, ...l); + }, u = f.teardown; + f.teardown = () => { + a(), u?.(); + }; + }; + }), e; +} +function We(e, t, r, n) { + var s = (r & ls) !== 0, i = (r & os) !== 0, o = ( + /** @type {V} */ + n + ), l = !0, f = () => (l && (l = !1, o = i ? ft( + /** @type {() => V} */ + n + ) : ( + /** @type {V} */ + n + )), o); + let a; + if (s) { + var u = vt in e || Ur in e; + a = qe(e, t)?.set ?? (u && t in e ? (g) => e[t] = g : void 0); + } + var c, d = !1; + s ? [c, d] = Bs(() => ( + /** @type {V} */ + e[t] + )) : c = /** @type {V} */ + e[t], c === void 0 && n !== void 0 && (c = f(), a && (Cs(), a(c))); + var v; + if (v = () => { + var g = ( + /** @type {V} */ + e[t] + ); + return g === void 0 ? f() : (l = !0, g); + }, !(r & is)) + return v; + if (a) { + var h = e.$$legacy; + return ( + /** @type {() => V} */ + function(g, C) { + return arguments.length > 0 ? ((!C || h || d) && a(C ? v() : g), g) : v(); + } + ); + } + var p = !1, _ = (r & ss ? Pt : fn)(() => (p = !1, v())); + s && b(_); + var $ = ( + /** @type {Effect} */ + y + ); + return ( + /** @type {() => V} */ + function(g, C) { + if (arguments.length > 0) { + const w = C ? b(_) : s ? je(g) : g; + return te(_, w), p = !0, o !== void 0 && (o = w), g; + } + return Me && p || $.f & Z ? _.v : b(_); + } + ); +} +function Hi(e) { + return new qi(e); +} +class qi { + /** @type {any} */ + #e; + /** @type {Record} */ + #n; + /** + * @param {ComponentConstructorOptions & { + * component: any; + * }} options + */ + constructor(t) { + var r = /* @__PURE__ */ new Map(), n = (i, o) => { + var l = /* @__PURE__ */ ar(o, !1, !1); + return r.set(i, l), l; + }; + const s = new Proxy( + { ...t.props || {}, $$events: {} }, + { + get(i, o) { + return b(r.get(o) ?? n(o, Reflect.get(i, o))); + }, + has(i, o) { + return o === Ur ? !0 : (b(r.get(o) ?? n(o, Reflect.get(i, o))), Reflect.has(i, o)); + }, + set(i, o, l) { + return te(r.get(o) ?? n(o, l), l), Reflect.set(i, o, l); + } + } + ); + this.#n = (t.hydrate ? mi : In)(t.component, { + target: t.target, + anchor: t.anchor, + props: s, + context: t.context, + intro: t.intro ?? !1, + recover: t.recover, + transformError: t.transformError + }), (!t?.props?.$$host || t.sync === !1) && Se(), this.#e = s.$$events; + for (const i of Object.keys(this.#n)) + i === "$set" || i === "$destroy" || i === "$on" || gt(this, i, { + get() { + return this.#n[i]; + }, + /** @param {any} value */ + set(o) { + this.#n[i] = o; + }, + enumerable: !0 + }); + this.#n.$set = /** @param {Record} next */ + (i) => { + Object.assign(s, i); + }, this.#n.$destroy = () => { + wi(this.#n); + }; + } + /** @param {Record} props */ + $set(t) { + this.#n.$set(t); + } + /** + * @param {string} event + * @param {(...args: any[]) => any} callback + * @returns {any} + */ + $on(t, r) { + this.#e[t] = this.#e[t] || []; + const n = (...s) => r.call(this, ...s); + return this.#e[t].push(n), () => { + this.#e[t] = this.#e[t].filter( + /** @param {any} fn */ + (s) => s !== n + ); + }; + } + $destroy() { + this.#n.$destroy(); + } +} +let Bn; +typeof HTMLElement == "function" && (Bn = class extends HTMLElement { + /** The Svelte component constructor */ + $$ctor; + /** Slots */ + $$s; + /** @type {any} The Svelte component instance */ + $$c; + /** Whether or not the custom element is connected */ + $$cn = !1; + /** @type {Record} Component props data */ + $$d = {}; + /** `true` if currently in the process of reflecting component props back to attributes */ + $$r = !1; + /** @type {Record} Props definition (name, reflected, type etc) */ + $$p_d = {}; + /** @type {Record} Event listeners */ + $$l = {}; + /** @type {Map} Event listener unsubscribe functions */ + $$l_u = /* @__PURE__ */ new Map(); + /** @type {any} The managed render effect for reflecting attributes */ + $$me; + /** @type {ShadowRoot | null} The ShadowRoot of the custom element */ + $$shadowRoot = null; + /** + * @param {*} $$componentCtor + * @param {*} $$slots + * @param {ShadowRootInit | undefined} shadow_root_init + */ + constructor(e, t, r) { + super(), this.$$ctor = e, this.$$s = t, r && (this.$$shadowRoot = this.attachShadow(r)); + } + /** + * @param {string} type + * @param {EventListenerOrEventListenerObject} listener + * @param {boolean | AddEventListenerOptions} [options] + */ + addEventListener(e, t, r) { + if (this.$$l[e] = this.$$l[e] || [], this.$$l[e].push(t), this.$$c) { + const n = this.$$c.$on(e, t); + this.$$l_u.set(t, n); + } + super.addEventListener(e, t, r); + } + /** + * @param {string} type + * @param {EventListenerOrEventListenerObject} listener + * @param {boolean | AddEventListenerOptions} [options] + */ + removeEventListener(e, t, r) { + if (super.removeEventListener(e, t, r), this.$$c) { + const n = this.$$l_u.get(t); + n && (n(), this.$$l_u.delete(t)); + } + } + async connectedCallback() { + if (this.$$cn = !0, !this.$$c) { + let e = function(n) { + return (s) => { + const i = Ft("slot"); + n !== "default" && (i.name = n), U(s, i); + }; + }; + if (await Promise.resolve(), !this.$$cn || this.$$c) + return; + const t = {}, r = Bi(this); + for (const n of this.$$s) + n in r && (n === "default" && !this.$$d.children ? (this.$$d.children = e(n), t.default = !0) : t[n] = e(n)); + for (const n of this.attributes) { + const s = this.$$g_p(n.name); + s in this.$$d || (this.$$d[s] = St(s, n.value, this.$$p_d, "toProp")); + } + for (const n in this.$$p_d) + !(n in this.$$d) && this[n] !== void 0 && (this.$$d[n] = this[n], delete this[n]); + this.$$c = Hi({ + component: this.$$ctor, + target: this.$$shadowRoot || this, + props: { + ...this.$$d, + $$slots: t, + $$host: this + } + }), this.$$me = si(() => { + dr(() => { + this.$$r = !0; + for (const n of At(this.$$c)) { + if (!this.$$p_d[n]?.reflect) continue; + this.$$d[n] = this.$$c[n]; + const s = St( + n, + this.$$d[n], + this.$$p_d, + "toAttribute" + ); + s == null ? this.removeAttribute(this.$$p_d[n].attribute || n) : this.setAttribute(this.$$p_d[n].attribute || n, s); + } + this.$$r = !1; + }); + }); + for (const n in this.$$l) + for (const s of this.$$l[n]) { + const i = this.$$c.$on(n, s); + this.$$l_u.set(s, i); + } + this.$$l = {}; + } + } + // We don't need this when working within Svelte code, but for compatibility of people using this outside of Svelte + // and setting attributes through setAttribute etc, this is helpful + /** + * @param {string} attr + * @param {string} _oldValue + * @param {string} newValue + */ + attributeChangedCallback(e, t, r) { + this.$$r || (e = this.$$g_p(e), this.$$d[e] = St(e, r, this.$$p_d, "toProp"), this.$$c?.$set({ [e]: this.$$d[e] })); + } + disconnectedCallback() { + this.$$cn = !1, Promise.resolve().then(() => { + !this.$$cn && this.$$c && (this.$$c.$destroy(), this.$$me(), this.$$c = void 0); + }); + } + /** + * @param {string} attribute_name + */ + $$g_p(e) { + return At(this.$$p_d).find( + (t) => this.$$p_d[t].attribute === e || !this.$$p_d[t].attribute && t.toLowerCase() === e + ) || e; + } +}); +function St(e, t, r, n) { + const s = r[e]?.type; + if (t = s === "Boolean" && typeof t != "boolean" ? t != null : t, !n || !r[e]) + return t; + if (n === "toAttribute") + switch (s) { + case "Object": + case "Array": + return t == null ? null : JSON.stringify(t); + case "Boolean": + return t ? "" : null; + case "Number": + return t ?? null; + default: + return t; + } + else + switch (s) { + case "Object": + case "Array": + return t && JSON.parse(t); + case "Boolean": + return t; + case "Number": + return t != null ? +t : t; + default: + return t; + } +} +function Bi(e) { + const t = {}; + return e.childNodes.forEach((r) => { + t[ + /** @type {Element} node */ + r.slot || "default" + ] = !0; + }), t; +} +function mr(e, t, r, n, s, i) { + let o = class extends Bn { + constructor() { + super(e, r, s), this.$$p_d = t; + } + static get observedAttributes() { + return At(t).map( + (l) => (t[l].attribute || l).toLowerCase() + ); + } + }; + return At(t).forEach((l) => { + gt(o.prototype, l, { + get() { + return this.$$c && l in this.$$c ? this.$$c[l] : this.$$d[l]; + }, + set(f) { + f = St(l, f, t), this.$$d[l] = f; + var a = this.$$c; + if (a) { + var u = qe(a, l)?.get; + u ? a[l] = f : a.$set({ [l]: f }); + } + } + }); + }), n.forEach((l) => { + gt(o.prototype, l, { + get() { + return this.$$c?.[l]; + } + }); + }), e.element = /** @type {any} */ + o, o; +} +const tt = rn(null), Un = rn({}), Ui = (e) => e; +function Vn(e) { + const t = e - 1; + return t * t * t + 1; +} +function Pr(e) { + const t = typeof e == "string" && e.match(/^\s*(-?[\d.]+)([^\s]*)\s*$/); + return t ? [parseFloat(t[1]), t[2] || "px"] : [ + /** @type {number} */ + e, + "px" + ]; +} +function Vi(e, { delay: t = 0, duration: r = 400, easing: n = Ui } = {}) { + const s = +getComputedStyle(e).opacity; + return { + delay: t, + duration: r, + easing: n, + css: (i) => `opacity: ${i * s}` + }; +} +function Yi(e, { delay: t = 0, duration: r = 400, easing: n = Vn, x: s = 0, y: i = 0, opacity: o = 0 } = {}) { + const l = getComputedStyle(e), f = +l.opacity, a = l.transform === "none" ? "" : l.transform, u = f * (1 - o), [c, d] = Pr(s), [v, h] = Pr(i); + return { + delay: t, + duration: r, + easing: n, + css: (p, _) => ` + transform: ${a} translate(${(1 - p) * c}${d}, ${(1 - p) * v}${h}); + opacity: ${f - u * _}` + }; +} +function Wi(e, { delay: t = 0, duration: r = 400, easing: n = Vn, start: s = 0, opacity: i = 0 } = {}) { + const o = getComputedStyle(e), l = +o.opacity, f = o.transform === "none" ? "" : o.transform, a = 1 - s, u = l * (1 - i); + return { + delay: t, + duration: r, + easing: n, + css: (c, d) => ` + transform: ${f} scale(${1 - a * d}); + opacity: ${l - u * d} + ` + }; +} +function Yn(e) { + const t = e - 1; + return t * t * t + 1; +} +var Gi = /* @__PURE__ */ ae(' '), Ki = /* @__PURE__ */ ae(' '), Ji = /* @__PURE__ */ ae(' '), Zi = /* @__PURE__ */ ae('
          1. '), Xi = /* @__PURE__ */ ae('
            Sources
              ', 1); +const Qi = { + hash: "svelte-3iukgs", + code: `:host {display:block;border-top:1px solid var(--line, #e5e7eb);background:var(--bg-soft, #f5f7fb);padding:12px 16px 14px;}:host(:not(:has(ol))) {display:none;}.src-h.svelte-3iukgs {font-size:10px;font-weight:700;text-transform:uppercase;letter-spacing:0.10em;color:var(--text-muted, #6b7280);margin:0 0 8px;}ol.svelte-3iukgs {margin:0;padding:0;list-style:none;display:grid;gap:6px;font-size:11.5px;line-height:1.45;}li.svelte-3iukgs {display:grid;grid-template-columns:22px 1fr;gap:8px;align-items:baseline;padding:4px 6px;border-radius:3px;cursor:pointer;transition:background 0.15s;}li.svelte-3iukgs:hover, li.hl.svelte-3iukgs {background:rgba(22, 66, 223, 0.10);}li.hl.svelte-3iukgs { + /* Brief pulse each time a chip selects this row. */ + animation: svelte-3iukgs-pulse 360ms cubic-bezier(.2,.7,.3,1);} + @keyframes svelte-3iukgs-pulse { + 0% { box-shadow: 0 0 0 0 rgba(22, 66, 223, 0.35); } + 60% { box-shadow: 0 0 0 6px rgba(22, 66, 223, 0.00); } + 100% { box-shadow: 0 0 0 0 rgba(22, 66, 223, 0.00); } + }.src-num.svelte-3iukgs {font-family:var(--mono, monospace);font-size:10.5px;font-weight:700;color:var(--nyc-blue, #1642DF);text-align:right;}.src-link.svelte-3iukgs {color:var(--text, #111);text-decoration:none;border-bottom:1px dotted var(--text-muted, #6b7280);}.src-link.svelte-3iukgs:hover {color:var(--nyc-blue, #1642DF);border-bottom-color:var(--nyc-blue, #1642DF);}.src-ext.svelte-3iukgs {font-size:9.5px;color:var(--text-faint, #9ca3af);margin-left:2px;vertical-align:super;}.src-vintage.svelte-3iukgs {display:block;color:var(--text-muted, #6b7280);font-size:9.5px;margin-top:2px;}.src-id.svelte-3iukgs {display:inline-block;font-family:var(--mono, monospace);font-size:9.5px;color:var(--text-faint, #9ca3af);margin-left:6px;}` +}; +function el(e, t) { + It(t, !0), br(e, Qi); + const r = () => Vt(Un, "$citeIndex", s), n = () => Vt(tt, "$highlightedDocId", s), [s, i] = nn(); + let o = We(t, "labels", 23, () => ({})), l = We(t, "urls", 23, () => ({})), f = We(t, "vintages", 23, () => ({})), a = /* @__PURE__ */ Le(() => Object.entries(r() || {}).sort((_, $) => _[1] - $[1])), u = /* @__PURE__ */ Le(n); + var c = { + get labels() { + return o(); + }, + set labels(_ = {}) { + o(_), Se(); + }, + get urls() { + return l(); + }, + set urls(_ = {}) { + l(_), Se(); + }, + get vintages() { + return f(); + }, + set vintages(_ = {}) { + f(_), Se(); + } + }, d = Ln(), v = Kt(d); + { + var h = (_) => { + var $ = Xi(), g = Kt($), C = $e(g, 2); + Pn(C, 21, () => b(a), ([w, m]) => w, (w, m) => { + var T = /* @__PURE__ */ Le(() => ws(b(m), 2)); + let M = () => b(T)[0], E = () => b(T)[1]; + const L = /* @__PURE__ */ Le(() => l()[M()]), q = /* @__PURE__ */ Le(() => o()[M()] || M()), O = /* @__PURE__ */ Le(() => f()[M()]); + var k = Zi(); + let I; + var X = B(k), Wn = B(X); + F(X); + var wr = $e(X, 2), yr = B(wr); + { + var Gn = (pe) => { + var K = Gi(), ut = B(K); + Vr(), F(K), Te(() => { + Fi(K, "href", b(L)), fe(ut, `${b(q) ?? ""} `); + }), Nr("click", K, (Qn) => Qn.stopPropagation()), U(pe, K); + }, Kn = (pe) => { + var K = Ki(), ut = B(K, !0); + F(K), Te(() => fe(ut, b(q))), U(pe, K); + }; + Ye(yr, (pe) => { + b(L) ? pe(Gn) : pe(Kn, -1); + }); + } + var zt = $e(yr, 2), Jn = B(zt, !0); + F(zt); + var Zn = $e(zt, 2); + { + var Xn = (pe) => { + var K = Ji(), ut = B(K, !0); + F(K), Te(() => fe(ut, b(O))), U(pe, K); + }; + Ye(Zn, (pe) => { + b(O) && pe(Xn); + }); + } + F(wr), F(k), Te(() => { + I = Hn(k, 1, "svelte-3iukgs", null, I, { hl: M() === b(u) }), fe(Wn, `[${E() ?? ""}]`), fe(Jn, M()); + }), hi("mouseenter", k, () => tt.set(M())), Nr("click", k, () => tt.set(b(u) === M() ? null : M())), tr(1, k, () => Wi, () => ({ start: 0.96, duration: 220, easing: Yn })), U(w, k); + }), F(C), tr(1, g, () => Vi, () => ({ duration: 200 })), U(_, $); + }; + Ye(v, (_) => { + b(a).length && _(h); + }); + } + U(e, d); + var p = Dt(c); + return i(), p; +} +di(["click"]); +customElements.define("r-sources-footer", mr( + el, + { + labels: { type: "Object" }, + urls: { type: "Object" }, + vintages: { type: "Object" } + }, + [], + [], + { mode: "open" } +)); +var tl = /* @__PURE__ */ ae('
              Waiting for content…
              '), rl = /* @__PURE__ */ ae('
              '); +const nl = { + hash: "svelte-5ir0b", + code: `:host {display:block;} + /* The host-level styles for typography, .cite, etc. live in the parent + stylesheet and target #paragraph descendants — they pierce shadow DOM + for inline-styled markup we don't ship here. The .rsum-* classes are + wired in the global stylesheet. We intentionally don't restate them. */:host(.streaming)::after, + :host([streaming])::after {content:"▋";display:inline-block;color:var(--nyc-blue, #1642DF);margin-left:2px; + animation: svelte-5ir0b-caret 0.9s steps(1) infinite;} + @keyframes svelte-5ir0b-caret { 50% { opacity: 0; } }` +}; +function sl(e, t) { + It(t, !0), br(e, nl); + const r = () => Vt(tt, "$highlightedDocId", n), [n, s] = nn(); + let i = We(t, "text", 7, ""), o = We(t, "streaming", 7, !1), l = We(t, "sourceLabels", 23, () => ({})); + const f = (w) => String(w ?? "").replace(/&/g, "&").replace(//g, ">"); + function a(w) { + const m = w.split(` +`), T = []; + let M = [], E = []; + const L = () => { + if (!M.length) return; + const k = f(M.join(" ").trim()).replace(/\*\*([^*]+)\*\*/g, "$1"); + k && T.push(`

              ${k}

              `), M = []; + }, q = () => { + if (!E.length) return; + const k = E.map((I) => `
            1. ${f(I.trim()).replace(/\*\*([^*]+)\*\*/g, "$1")}
            2. `).join(""); + T.push(`
                ${k}
              `), E = []; + }, O = []; + for (const k of m) + if (k.trim().startsWith("- ") && k.includes(" - ", 2)) { + const I = k.split(/(?:^|(?<=\.\s))\s*-\s+/g).filter((X) => X.trim()); + for (const X of I) O.push("- " + X.trim()); + } else + O.push(k); + for (const k of O) { + const I = k.match(/^\s*\*\*([A-Z][A-Za-z\s/]+)\.\*\*\s*$/); + I ? (L(), q(), T.push(`

              ${f(I[1])}

              `)) : /^\s*[-*]\s+/.test(k) ? (L(), E.push(k.replace(/^\s*[-*]\s+/, ""))) : (q(), M.push(k)); + } + return L(), q(), T.join(""); + } + function u(w, m) { + return w.replace(/\[([a-z0-9_]+)\]/gi, (T, M) => { + const E = M.toLowerCase(); + m[E] == null && (m[E] = Object.keys(m).length + 1); + const L = m[E], q = l()[E] || E; + return `${L}`; + }); + } + let c = /* @__PURE__ */ Le(() => { + if (!i()) return ""; + const w = {}, m = a(i()), T = u(m, w); + return queueMicrotask(() => Un.set({ ...w })), T; + }), d, v = /* @__PURE__ */ Le(r); + mn(() => { + b(c), b(v), d && ui().then(() => { + d.querySelectorAll(".cite").forEach((m) => { + const T = m.dataset.srcId; + T && (m.classList.toggle("hl", T === b(v)), !m.dataset.bound && (m.dataset.bound = "1", m.addEventListener("mouseenter", () => tt.set(T)), m.addEventListener("click", (M) => { + M.stopPropagation(), tt.update((E) => E === T ? null : T); + }))); + }); + }); + }); + var h = { + get text() { + return i(); + }, + set text(w = "") { + i(w), Se(); + }, + get streaming() { + return o(); + }, + set streaming(w = !1) { + o(w), Se(); + }, + get sourceLabels() { + return l(); + }, + set sourceLabels(w = {}) { + l(w), Se(); + } + }, p = Ln(), _ = Kt(p); + { + var $ = (w) => { + var m = tl(); + U(w, m); + }, g = (w) => { + var m = rl(); + Si(m, () => b(c), !0), F(m), qn(m, (T) => d = T, () => d), U(w, m); + }; + Ye(_, (w) => { + i() ? w(g, -1) : w($); + }); + } + U(e, p); + var C = Dt(h); + return s(), C; +} +customElements.define("r-briefing", mr( + sl, + { + text: { type: "String" }, + streaming: { reflect: !0, type: "Boolean" }, + sourceLabels: { type: "Object" } + }, + [], + [], + { mode: "open" } +)); +var il = /* @__PURE__ */ ae(' '), ll = /* @__PURE__ */ ae('
              '), ol = /* @__PURE__ */ ae('
              '), al = /* @__PURE__ */ ae('
            3. '), fl = /* @__PURE__ */ ae('
                '); +const ul = { + hash: "svelte-c4g9ik", + code: ":host {display:block;}ol.svelte-c4g9ik {list-style:none;margin:0;padding:4px 0;font-size:12.5px;}li.svelte-c4g9ik {display:grid;grid-template-columns:18px 1fr auto;gap:10px;padding:7px 14px;border-bottom:1px solid var(--line, #e5e7eb);align-items:baseline;}li.svelte-c4g9ik:last-child {border-bottom:0;}.icon.svelte-c4g9ik {font-weight:700;font-size:14px;line-height:1;}.running.svelte-c4g9ik .icon:where(.svelte-c4g9ik) {color:var(--nyc-blue, #1642DF);}.ok.svelte-c4g9ik .icon:where(.svelte-c4g9ik) {color:var(--good, #1a8754);}.err.svelte-c4g9ik .icon:where(.svelte-c4g9ik) {color:var(--nyc-scarlet, #b80000);}.label.svelte-c4g9ik {color:var(--text, #111);font-weight:500;}.meta.svelte-c4g9ik {color:var(--text-muted, #6b7280);font-size:11px;}.time.svelte-c4g9ik {font-family:var(--mono, monospace);color:var(--text-faint, #9ca3af);font-size:11.5px;}.running.svelte-c4g9ik {background:rgba(22, 66, 223, 0.04);}.result.svelte-c4g9ik {grid-column:2 / -1;color:var(--text-muted, #6b7280);font-size:11px;font-family:var(--mono, monospace);margin-top:3px;word-break:break-word;line-height:1.4;}" +}; +function cl(e, t) { + It(t, !0), br(e, ul); + let r = We(t, "stepLabels", 23, () => ({})), n = /* @__PURE__ */ me(je([])); + $i(() => { + const c = s?.getRootNode()?.host; + c && (c.pushStep = (d) => { + te(n, [...b(n), d], !0); + }, c.clear = () => { + te(n, [], !0); + }); + }); + let s; + function i(c) { + return c.ok === !0 ? "ok" : c.ok === !1 ? "err" : "running"; + } + function o(c) { + return c.ok === !0 ? "✓" : c.ok === !1 ? "✗" : "○"; + } + function l(c) { + return r()[c.step] && r()[c.step][0] || c.step; + } + function f(c) { + return r()[c.step] && r()[c.step][1] || ""; + } + var a = { + get stepLabels() { + return r(); + }, + set stepLabels(c = {}) { + r(c), Se(); + } + }, u = fl(); + return Pn(u, 21, () => b(n), ki, (c, d) => { + var v = al(), h = B(v), p = B(h, !0); + F(h); + var _ = $e(h, 2), $ = B(_), g = B($, !0); + F($); + var C = $e($, 2), w = B(C, !0); + F(C), F(_); + var m = $e(_, 2); + { + var T = (O) => { + var k = il(), I = B(k); + F(k), Te(() => fe(I, `${b(d).elapsed_s ?? ""}s`)), U(O, k); + }; + Ye(m, (O) => { + b(d).elapsed_s != null && O(T); + }); + } + var M = $e(m, 2); + { + var E = (O) => { + var k = ll(), I = B(k, !0); + F(k), Te((X) => fe(I, X), [() => JSON.stringify(b(d).result)]), U(O, k); + }; + Ye(M, (O) => { + b(d).result && O(E); + }); + } + var L = $e(M, 2); + { + var q = (O) => { + var k = ol(), I = B(k, !0); + F(k), Te(() => fe(I, b(d).err)), U(O, k); + }; + Ye(L, (O) => { + b(d).err && O(q); + }); + } + F(v), Te( + (O, k, I, X) => { + Hn(v, 1, O, "svelte-c4g9ik"), fe(p, k), fe(g, I), fe(w, X); + }, + [ + () => Ri(i(b(d))), + () => o(b(d)), + () => l(b(d)), + () => f(b(d)) + ] + ), tr(1, v, () => Yi, () => ({ y: -8, duration: 220, easing: Yn })), U(c, v); + }), F(u), qn(u, (c) => s = c, () => s), U(e, u), Dt(a); +} +customElements.define("r-trace", mr(cl, { stepLabels: { type: "Object" } }, [], [], { mode: "open" })); +export { + Un as citeIndex, + tt as highlightedDocId +}; +//# sourceMappingURL=riprap.js.map diff --git a/web/static/dist/riprap.js.map b/web/static/dist/riprap.js.map new file mode 100644 index 0000000000000000000000000000000000000000..2bbbb343a095476ba5f1614e9495d3ad40e8d749 --- /dev/null +++ b/web/static/dist/riprap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"riprap.js","sources":["../../svelte/node_modules/svelte/src/version.js","../../svelte/node_modules/svelte/src/internal/disclose-version.js","../../svelte/node_modules/svelte/src/constants.js","../../svelte/node_modules/esm-env/false.js","../../svelte/node_modules/svelte/src/internal/shared/utils.js","../../svelte/node_modules/svelte/src/internal/client/constants.js","../../svelte/node_modules/svelte/src/internal/shared/errors.js","../../svelte/node_modules/svelte/src/internal/client/errors.js","../../svelte/node_modules/svelte/src/internal/client/warnings.js","../../svelte/node_modules/svelte/src/internal/client/dom/hydration.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/equality.js","../../svelte/node_modules/svelte/src/internal/flags/index.js","../../svelte/node_modules/svelte/src/internal/client/context.js","../../svelte/node_modules/svelte/src/internal/client/dom/task.js","../../svelte/node_modules/svelte/src/internal/client/error-handling.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/status.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/utils.js","../../svelte/node_modules/svelte/src/store/utils.js","../../svelte/node_modules/svelte/src/store/shared/index.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/store.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/batch.js","../../svelte/node_modules/svelte/src/reactivity/create-subscriber.js","../../svelte/node_modules/svelte/src/internal/client/dom/blocks/boundary.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/async.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/deriveds.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/sources.js","../../svelte/node_modules/svelte/src/internal/client/proxy.js","../../svelte/node_modules/svelte/src/internal/client/dom/operations.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/bindings/shared.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/effects.js","../../svelte/node_modules/svelte/src/internal/client/runtime.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/events.js","../../svelte/node_modules/svelte/src/internal/client/dom/reconciler.js","../../svelte/node_modules/svelte/src/internal/client/dom/template.js","../../svelte/node_modules/svelte/src/utils.js","../../svelte/node_modules/svelte/src/internal/client/render.js","../../svelte/node_modules/svelte/src/internal/client/dom/blocks/branches.js","../../svelte/node_modules/svelte/src/index-client.js","../../svelte/node_modules/svelte/src/internal/client/dom/blocks/if.js","../../svelte/node_modules/svelte/src/internal/client/dom/blocks/each.js","../../svelte/node_modules/svelte/src/internal/client/dom/blocks/html.js","../../svelte/node_modules/svelte/src/internal/client/timing.js","../../svelte/node_modules/svelte/src/internal/client/loop.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/transitions.js","../../svelte/node_modules/svelte/src/internal/client/dom/css.js","../../svelte/node_modules/clsx/dist/clsx.mjs","../../svelte/node_modules/svelte/src/internal/shared/attributes.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/class.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/attributes.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/bindings/this.js","../../svelte/node_modules/svelte/src/internal/client/reactivity/props.js","../../svelte/node_modules/svelte/src/legacy/legacy-client.js","../../svelte/node_modules/svelte/src/internal/client/dom/elements/custom-element.js","../../svelte/src/lib/stores.js","../../svelte/node_modules/svelte/src/transition/index.js","../../svelte/node_modules/svelte/src/easing/index.js","../../svelte/src/lib/SourcesFooter.svelte","../../svelte/src/lib/Briefing.svelte","../../svelte/src/lib/Trace.svelte"],"sourcesContent":["// generated during release, do not modify\n\n/**\n * The current version, as set in package.json.\n * @type {string}\n */\nexport const VERSION = '5.55.5';\nexport const PUBLIC_VERSION = '5';\n","import { PUBLIC_VERSION } from '../version.js';\n\nif (typeof window !== 'undefined') {\n\t// @ts-expect-error\n\t((window.__svelte ??= {}).v ??= new Set()).add(PUBLIC_VERSION);\n}\n","export const EACH_ITEM_REACTIVE = 1;\nexport const EACH_INDEX_REACTIVE = 1 << 1;\n/** See EachBlock interface metadata.is_controlled for an explanation what this is */\nexport const EACH_IS_CONTROLLED = 1 << 2;\nexport const EACH_IS_ANIMATED = 1 << 3;\nexport const EACH_ITEM_IMMUTABLE = 1 << 4;\n\nexport const PROPS_IS_IMMUTABLE = 1;\nexport const PROPS_IS_RUNES = 1 << 1;\nexport const PROPS_IS_UPDATED = 1 << 2;\nexport const PROPS_IS_BINDABLE = 1 << 3;\nexport const PROPS_IS_LAZY_INITIAL = 1 << 4;\n\nexport const TRANSITION_IN = 1;\nexport const TRANSITION_OUT = 1 << 1;\nexport const TRANSITION_GLOBAL = 1 << 2;\n\nexport const TEMPLATE_FRAGMENT = 1;\nexport const TEMPLATE_USE_IMPORT_NODE = 1 << 1;\nexport const TEMPLATE_USE_SVG = 1 << 2;\nexport const TEMPLATE_USE_MATHML = 1 << 3;\n\nexport const HYDRATION_START = '[';\n/** used to indicate that an `{:else}...` block was rendered */\nexport const HYDRATION_START_ELSE = '[!';\n/** used to indicate that a boundary's `failed` snippet was rendered on the server */\nexport const HYDRATION_START_FAILED = '[?';\nexport const HYDRATION_END = ']';\nexport const HYDRATION_ERROR = {};\n\nexport const ELEMENT_IS_NAMESPACED = 1;\nexport const ELEMENT_PRESERVE_ATTRIBUTE_CASE = 1 << 1;\nexport const ELEMENT_IS_INPUT = 1 << 2;\n\nexport const UNINITIALIZED = Symbol();\n\n// Dev-time component properties\nexport const FILENAME = Symbol('filename');\nexport const HMR = Symbol('hmr');\n\nexport const NAMESPACE_HTML = 'http://www.w3.org/1999/xhtml';\nexport const NAMESPACE_SVG = 'http://www.w3.org/2000/svg';\nexport const NAMESPACE_MATHML = 'http://www.w3.org/1998/Math/MathML';\n\n// we use a list of ignorable runtime warnings because not every runtime warning\n// can be ignored and we want to keep the validation for svelte-ignore in place\nexport const IGNORABLE_RUNTIME_WARNINGS = /** @type {const} */ ([\n\t'await_waterfall',\n\t'await_reactivity_loss',\n\t'state_snapshot_uncloneable',\n\t'binding_property_non_reactive',\n\t'hydration_attribute_changed',\n\t'hydration_html_changed',\n\t'ownership_invalid_binding',\n\t'ownership_invalid_mutation'\n]);\n\n/**\n * Whitespace inside one of these elements will not result in\n * a whitespace node being created in any circumstances. (This\n * list is almost certainly very incomplete)\n * TODO this is currently unused\n */\nexport const ELEMENTS_WITHOUT_TEXT = ['audio', 'datalist', 'dl', 'optgroup', 'select', 'video'];\n\nexport const ATTACHMENT_KEY = '@attach';\n","export default false;\n","// Store the references to globals in case someone tries to monkey patch these, causing the below\n// to de-opt (this occurs often when using popular extensions).\nexport var is_array = Array.isArray;\nexport var index_of = Array.prototype.indexOf;\nexport var includes = Array.prototype.includes;\nexport var array_from = Array.from;\nexport var object_keys = Object.keys;\nexport var define_property = Object.defineProperty;\nexport var get_descriptor = Object.getOwnPropertyDescriptor;\nexport var get_descriptors = Object.getOwnPropertyDescriptors;\nexport var object_prototype = Object.prototype;\nexport var array_prototype = Array.prototype;\nexport var get_prototype_of = Object.getPrototypeOf;\nexport var is_extensible = Object.isExtensible;\nexport var has_own_property = Object.prototype.hasOwnProperty;\n\n/**\n * @param {any} thing\n * @returns {thing is Function}\n */\nexport function is_function(thing) {\n\treturn typeof thing === 'function';\n}\n\nexport const noop = () => {};\n\n// Adapted from https://github.com/then/is-promise/blob/master/index.js\n// Distributed under MIT License https://github.com/then/is-promise/blob/master/LICENSE\n\n/**\n * @template [T=any]\n * @param {any} value\n * @returns {value is PromiseLike}\n */\nexport function is_promise(value) {\n\treturn typeof value?.then === 'function';\n}\n\n/** @param {Function} fn */\nexport function run(fn) {\n\treturn fn();\n}\n\n/** @param {Array<() => void>} arr */\nexport function run_all(arr) {\n\tfor (var i = 0; i < arr.length; i++) {\n\t\tarr[i]();\n\t}\n}\n\n/**\n * TODO replace with Promise.withResolvers once supported widely enough\n * @template [T=void]\n */\nexport function deferred() {\n\t/** @type {(value: T) => void} */\n\tvar resolve;\n\n\t/** @type {(reason: any) => void} */\n\tvar reject;\n\n\t/** @type {Promise} */\n\tvar promise = new Promise((res, rej) => {\n\t\tresolve = res;\n\t\treject = rej;\n\t});\n\n\t// @ts-expect-error\n\treturn { promise, resolve, reject };\n}\n\n/**\n * @template V\n * @param {V} value\n * @param {V | (() => V)} fallback\n * @param {boolean} [lazy]\n * @returns {V}\n */\nexport function fallback(value, fallback, lazy = false) {\n\treturn value === undefined\n\t\t? lazy\n\t\t\t? /** @type {() => V} */ (fallback)()\n\t\t\t: /** @type {V} */ (fallback)\n\t\t: value;\n}\n\n/**\n * When encountering a situation like `let [a, b, c] = $derived(blah())`,\n * we need to stash an intermediate value that `a`, `b`, and `c` derive\n * from, in case it's an iterable\n * @template T\n * @param {ArrayLike | Iterable} value\n * @param {number} [n]\n * @returns {Array}\n */\nexport function to_array(value, n) {\n\t// return arrays unchanged\n\tif (Array.isArray(value)) {\n\t\treturn value;\n\t}\n\n\t// if value is not iterable, or `n` is unspecified (indicates a rest\n\t// element, which means we're not concerned about unbounded iterables)\n\t// convert to an array with `Array.from`\n\tif (n === undefined || !(Symbol.iterator in value)) {\n\t\treturn Array.from(value);\n\t}\n\n\t// otherwise, populate an array with `n` values\n\n\t/** @type {T[]} */\n\tconst array = [];\n\n\tfor (const element of value) {\n\t\tarray.push(element);\n\t\tif (array.length === n) break;\n\t}\n\n\treturn array;\n}\n\n/**\n * @param {Record} obj\n * @param {Array} keys\n * @returns {Record}\n */\nexport function exclude_from_object(obj, keys) {\n\t/** @type {Record} */\n\tvar result = {};\n\n\tfor (var key in obj) {\n\t\tif (!keys.includes(key)) {\n\t\t\tresult[key] = obj[key];\n\t\t}\n\t}\n\n\tfor (var symbol of Object.getOwnPropertySymbols(obj)) {\n\t\tif (Object.propertyIsEnumerable.call(obj, symbol) && !keys.includes(symbol)) {\n\t\t\tresult[symbol] = obj[symbol];\n\t\t}\n\t}\n\n\treturn result;\n}\n","// General flags\nexport const DERIVED = 1 << 1;\nexport const EFFECT = 1 << 2;\nexport const RENDER_EFFECT = 1 << 3;\n/**\n * An effect that does not destroy its child effects when it reruns.\n * Runs as part of render effects, i.e. not eagerly as part of tree traversal or effect flushing.\n */\nexport const MANAGED_EFFECT = 1 << 24;\n/**\n * An effect that does not destroy its child effects when it reruns (like MANAGED_EFFECT).\n * Runs eagerly as part of tree traversal or effect flushing.\n */\nexport const BLOCK_EFFECT = 1 << 4;\nexport const BRANCH_EFFECT = 1 << 5;\nexport const ROOT_EFFECT = 1 << 6;\nexport const BOUNDARY_EFFECT = 1 << 7;\n/**\n * Indicates that a reaction is connected to an effect root — either it is an effect,\n * or it is a derived that is depended on by at least one effect. If a derived has\n * no dependents, we can disconnect it from the graph, allowing it to either be\n * GC'd or reconnected later if an effect comes to depend on it again\n */\nexport const CONNECTED = 1 << 9;\nexport const CLEAN = 1 << 10;\nexport const DIRTY = 1 << 11;\nexport const MAYBE_DIRTY = 1 << 12;\nexport const INERT = 1 << 13;\nexport const DESTROYED = 1 << 14;\n/** Set once a reaction has run for the first time */\nexport const REACTION_RAN = 1 << 15;\n/** Effect is in the process of getting destroyed. Can be observed in child teardown functions */\nexport const DESTROYING = 1 << 25;\n\n// Flags exclusive to effects\n/**\n * 'Transparent' effects do not create a transition boundary.\n * This is on a block effect 99% of the time but may also be on a branch effect if its parent block effect was pruned\n */\nexport const EFFECT_TRANSPARENT = 1 << 16;\nexport const EAGER_EFFECT = 1 << 17;\nexport const HEAD_EFFECT = 1 << 18;\nexport const EFFECT_PRESERVED = 1 << 19;\nexport const USER_EFFECT = 1 << 20;\nexport const EFFECT_OFFSCREEN = 1 << 25;\n\n// Flags exclusive to deriveds\n/**\n * Tells that we marked this derived and its reactions as visited during the \"mark as (maybe) dirty\"-phase.\n * Will be lifted during execution of the derived and during checking its dirty state (both are necessary\n * because a derived might be checked but not executed). This is a pure performance optimization flag and\n * should not be used for any other purpose!\n */\nexport const WAS_MARKED = 1 << 16;\n\n// Flags used for async\nexport const REACTION_IS_UPDATING = 1 << 21;\nexport const ASYNC = 1 << 22;\n\nexport const ERROR_VALUE = 1 << 23;\n\nexport const STATE_SYMBOL = Symbol('$state');\nexport const LEGACY_PROPS = Symbol('legacy props');\nexport const LOADING_ATTR_SYMBOL = Symbol('');\nexport const PROXY_PATH_SYMBOL = Symbol('proxy path');\n/** An anchor might change, via this symbol on the original anchor we can tell HMR about the updated anchor */\nexport const HMR_ANCHOR = Symbol('hmr anchor');\n\n/** allow users to ignore aborted signal errors if `reason.name === 'StaleReactionError` */\nexport const STALE_REACTION = new (class StaleReactionError extends Error {\n\tname = 'StaleReactionError';\n\tmessage = 'The reaction that called `getAbortSignal()` was re-run or destroyed';\n})();\n\nexport const IS_XHTML =\n\t// We gotta write it like this because after downleveling the pure comment may end up in the wrong location\n\t!!globalThis.document?.contentType &&\n\t/* @__PURE__ */ globalThis.document.contentType.includes('xml');\nexport const ELEMENT_NODE = 1;\nexport const TEXT_NODE = 3;\nexport const COMMENT_NODE = 8;\nexport const DOCUMENT_FRAGMENT_NODE = 11;\n","/* This file is generated by scripts/process-messages/index.js. Do not edit! */\n\nimport { DEV } from 'esm-env';\n\n/**\n * Cannot use `%name%(...)` unless the `experimental.async` compiler option is `true`\n * @param {string} name\n * @returns {never}\n */\nexport function experimental_async_required(name) {\n\tif (DEV) {\n\t\tconst error = new Error(`experimental_async_required\\nCannot use \\`${name}(...)\\` unless the \\`experimental.async\\` compiler option is \\`true\\`\\nhttps://svelte.dev/e/experimental_async_required`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/experimental_async_required`);\n\t}\n}\n\n/**\n * Cannot use `{@render children(...)}` if the parent component uses `let:` directives. Consider using a named snippet instead\n * @returns {never}\n */\nexport function invalid_default_snippet() {\n\tif (DEV) {\n\t\tconst error = new Error(`invalid_default_snippet\\nCannot use \\`{@render children(...)}\\` if the parent component uses \\`let:\\` directives. Consider using a named snippet instead\\nhttps://svelte.dev/e/invalid_default_snippet`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/invalid_default_snippet`);\n\t}\n}\n\n/**\n * A snippet function was passed invalid arguments. Snippets should only be instantiated via `{@render ...}`\n * @returns {never}\n */\nexport function invalid_snippet_arguments() {\n\tif (DEV) {\n\t\tconst error = new Error(`invalid_snippet_arguments\\nA snippet function was passed invalid arguments. Snippets should only be instantiated via \\`{@render ...}\\`\\nhttps://svelte.dev/e/invalid_snippet_arguments`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/invalid_snippet_arguments`);\n\t}\n}\n\n/**\n * An invariant violation occurred, meaning Svelte's internal assumptions were flawed. This is a bug in Svelte, not your app — please open an issue at https://github.com/sveltejs/svelte, citing the following message: \"%message%\"\n * @param {string} message\n * @returns {never}\n */\nexport function invariant_violation(message) {\n\tif (DEV) {\n\t\tconst error = new Error(`invariant_violation\\nAn invariant violation occurred, meaning Svelte's internal assumptions were flawed. This is a bug in Svelte, not your app — please open an issue at https://github.com/sveltejs/svelte, citing the following message: \"${message}\"\\nhttps://svelte.dev/e/invariant_violation`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/invariant_violation`);\n\t}\n}\n\n/**\n * `%name%(...)` can only be used during component initialisation\n * @param {string} name\n * @returns {never}\n */\nexport function lifecycle_outside_component(name) {\n\tif (DEV) {\n\t\tconst error = new Error(`lifecycle_outside_component\\n\\`${name}(...)\\` can only be used during component initialisation\\nhttps://svelte.dev/e/lifecycle_outside_component`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/lifecycle_outside_component`);\n\t}\n}\n\n/**\n * Context was not set in a parent component\n * @returns {never}\n */\nexport function missing_context() {\n\tif (DEV) {\n\t\tconst error = new Error(`missing_context\\nContext was not set in a parent component\\nhttps://svelte.dev/e/missing_context`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/missing_context`);\n\t}\n}\n\n/**\n * Attempted to render a snippet without a `{@render}` block. This would cause the snippet code to be stringified instead of its content being rendered to the DOM. To fix this, change `{snippet}` to `{@render snippet()}`.\n * @returns {never}\n */\nexport function snippet_without_render_tag() {\n\tif (DEV) {\n\t\tconst error = new Error(`snippet_without_render_tag\\nAttempted to render a snippet without a \\`{@render}\\` block. This would cause the snippet code to be stringified instead of its content being rendered to the DOM. To fix this, change \\`{snippet}\\` to \\`{@render snippet()}\\`.\\nhttps://svelte.dev/e/snippet_without_render_tag`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/snippet_without_render_tag`);\n\t}\n}\n\n/**\n * `%name%` is not a store with a `subscribe` method\n * @param {string} name\n * @returns {never}\n */\nexport function store_invalid_shape(name) {\n\tif (DEV) {\n\t\tconst error = new Error(`store_invalid_shape\\n\\`${name}\\` is not a store with a \\`subscribe\\` method\\nhttps://svelte.dev/e/store_invalid_shape`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/store_invalid_shape`);\n\t}\n}\n\n/**\n * The `this` prop on `` must be a string, if defined\n * @returns {never}\n */\nexport function svelte_element_invalid_this_value() {\n\tif (DEV) {\n\t\tconst error = new Error(`svelte_element_invalid_this_value\\nThe \\`this\\` prop on \\`\\` must be a string, if defined\\nhttps://svelte.dev/e/svelte_element_invalid_this_value`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/svelte_element_invalid_this_value`);\n\t}\n}","/* This file is generated by scripts/process-messages/index.js. Do not edit! */\n\nimport { DEV } from 'esm-env';\n\nexport * from '../shared/errors.js';\n\n/**\n * Cannot create a `$derived(...)` with an `await` expression outside of an effect tree\n * @returns {never}\n */\nexport function async_derived_orphan() {\n\tif (DEV) {\n\t\tconst error = new Error(`async_derived_orphan\\nCannot create a \\`$derived(...)\\` with an \\`await\\` expression outside of an effect tree\\nhttps://svelte.dev/e/async_derived_orphan`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/async_derived_orphan`);\n\t}\n}\n\n/**\n * Using `bind:value` together with a checkbox input is not allowed. Use `bind:checked` instead\n * @returns {never}\n */\nexport function bind_invalid_checkbox_value() {\n\tif (DEV) {\n\t\tconst error = new Error(`bind_invalid_checkbox_value\\nUsing \\`bind:value\\` together with a checkbox input is not allowed. Use \\`bind:checked\\` instead\\nhttps://svelte.dev/e/bind_invalid_checkbox_value`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/bind_invalid_checkbox_value`);\n\t}\n}\n\n/**\n * Component %component% has an export named `%key%` that a consumer component is trying to access using `bind:%key%`, which is disallowed. Instead, use `bind:this` (e.g. `<%name% bind:this={component} />`) and then access the property on the bound component instance (e.g. `component.%key%`)\n * @param {string} component\n * @param {string} key\n * @param {string} name\n * @returns {never}\n */\nexport function bind_invalid_export(component, key, name) {\n\tif (DEV) {\n\t\tconst error = new Error(`bind_invalid_export\\nComponent ${component} has an export named \\`${key}\\` that a consumer component is trying to access using \\`bind:${key}\\`, which is disallowed. Instead, use \\`bind:this\\` (e.g. \\`<${name} bind:this={component} />\\`) and then access the property on the bound component instance (e.g. \\`component.${key}\\`)\\nhttps://svelte.dev/e/bind_invalid_export`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/bind_invalid_export`);\n\t}\n}\n\n/**\n * A component is attempting to bind to a non-bindable property `%key%` belonging to %component% (i.e. `<%name% bind:%key%={...}>`). To mark a property as bindable: `let { %key% = $bindable() } = $props()`\n * @param {string} key\n * @param {string} component\n * @param {string} name\n * @returns {never}\n */\nexport function bind_not_bindable(key, component, name) {\n\tif (DEV) {\n\t\tconst error = new Error(`bind_not_bindable\\nA component is attempting to bind to a non-bindable property \\`${key}\\` belonging to ${component} (i.e. \\`<${name} bind:${key}={...}>\\`). To mark a property as bindable: \\`let { ${key} = $bindable() } = $props()\\`\\nhttps://svelte.dev/e/bind_not_bindable`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/bind_not_bindable`);\n\t}\n}\n\n/**\n * Calling `%method%` on a component instance (of %component%) is no longer valid in Svelte 5\n * @param {string} method\n * @param {string} component\n * @returns {never}\n */\nexport function component_api_changed(method, component) {\n\tif (DEV) {\n\t\tconst error = new Error(`component_api_changed\\nCalling \\`${method}\\` on a component instance (of ${component}) is no longer valid in Svelte 5\\nhttps://svelte.dev/e/component_api_changed`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/component_api_changed`);\n\t}\n}\n\n/**\n * Attempted to instantiate %component% with `new %name%`, which is no longer valid in Svelte 5. If this component is not under your control, set the `compatibility.componentApi` compiler option to `4` to keep it working.\n * @param {string} component\n * @param {string} name\n * @returns {never}\n */\nexport function component_api_invalid_new(component, name) {\n\tif (DEV) {\n\t\tconst error = new Error(`component_api_invalid_new\\nAttempted to instantiate ${component} with \\`new ${name}\\`, which is no longer valid in Svelte 5. If this component is not under your control, set the \\`compatibility.componentApi\\` compiler option to \\`4\\` to keep it working.\\nhttps://svelte.dev/e/component_api_invalid_new`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/component_api_invalid_new`);\n\t}\n}\n\n/**\n * A derived value cannot reference itself recursively\n * @returns {never}\n */\nexport function derived_references_self() {\n\tif (DEV) {\n\t\tconst error = new Error(`derived_references_self\\nA derived value cannot reference itself recursively\\nhttps://svelte.dev/e/derived_references_self`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/derived_references_self`);\n\t}\n}\n\n/**\n * Keyed each block has duplicate key `%value%` at indexes %a% and %b%\n * @param {string} a\n * @param {string} b\n * @param {string | undefined | null} [value]\n * @returns {never}\n */\nexport function each_key_duplicate(a, b, value) {\n\tif (DEV) {\n\t\tconst error = new Error(`each_key_duplicate\\n${value\n\t\t\t? `Keyed each block has duplicate key \\`${value}\\` at indexes ${a} and ${b}`\n\t\t\t: `Keyed each block has duplicate key at indexes ${a} and ${b}`}\\nhttps://svelte.dev/e/each_key_duplicate`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/each_key_duplicate`);\n\t}\n}\n\n/**\n * Keyed each block has key that is not idempotent — the key for item at index %index% was `%a%` but is now `%b%`. Keys must be the same each time for a given item\n * @param {string} index\n * @param {string} a\n * @param {string} b\n * @returns {never}\n */\nexport function each_key_volatile(index, a, b) {\n\tif (DEV) {\n\t\tconst error = new Error(`each_key_volatile\\nKeyed each block has key that is not idempotent — the key for item at index ${index} was \\`${a}\\` but is now \\`${b}\\`. Keys must be the same each time for a given item\\nhttps://svelte.dev/e/each_key_volatile`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/each_key_volatile`);\n\t}\n}\n\n/**\n * `%rune%` cannot be used inside an effect cleanup function\n * @param {string} rune\n * @returns {never}\n */\nexport function effect_in_teardown(rune) {\n\tif (DEV) {\n\t\tconst error = new Error(`effect_in_teardown\\n\\`${rune}\\` cannot be used inside an effect cleanup function\\nhttps://svelte.dev/e/effect_in_teardown`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/effect_in_teardown`);\n\t}\n}\n\n/**\n * Effect cannot be created inside a `$derived` value that was not itself created inside an effect\n * @returns {never}\n */\nexport function effect_in_unowned_derived() {\n\tif (DEV) {\n\t\tconst error = new Error(`effect_in_unowned_derived\\nEffect cannot be created inside a \\`$derived\\` value that was not itself created inside an effect\\nhttps://svelte.dev/e/effect_in_unowned_derived`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/effect_in_unowned_derived`);\n\t}\n}\n\n/**\n * `%rune%` can only be used inside an effect (e.g. during component initialisation)\n * @param {string} rune\n * @returns {never}\n */\nexport function effect_orphan(rune) {\n\tif (DEV) {\n\t\tconst error = new Error(`effect_orphan\\n\\`${rune}\\` can only be used inside an effect (e.g. during component initialisation)\\nhttps://svelte.dev/e/effect_orphan`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/effect_orphan`);\n\t}\n}\n\n/**\n * `$effect.pending()` can only be called inside an effect or derived\n * @returns {never}\n */\nexport function effect_pending_outside_reaction() {\n\tif (DEV) {\n\t\tconst error = new Error(`effect_pending_outside_reaction\\n\\`$effect.pending()\\` can only be called inside an effect or derived\\nhttps://svelte.dev/e/effect_pending_outside_reaction`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/effect_pending_outside_reaction`);\n\t}\n}\n\n/**\n * Maximum update depth exceeded. This typically indicates that an effect reads and writes the same piece of state\n * @returns {never}\n */\nexport function effect_update_depth_exceeded() {\n\tif (DEV) {\n\t\tconst error = new Error(`effect_update_depth_exceeded\\nMaximum update depth exceeded. This typically indicates that an effect reads and writes the same piece of state\\nhttps://svelte.dev/e/effect_update_depth_exceeded`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/effect_update_depth_exceeded`);\n\t}\n}\n\n/**\n * Cannot use `flushSync` inside an effect\n * @returns {never}\n */\nexport function flush_sync_in_effect() {\n\tif (DEV) {\n\t\tconst error = new Error(`flush_sync_in_effect\\nCannot use \\`flushSync\\` inside an effect\\nhttps://svelte.dev/e/flush_sync_in_effect`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/flush_sync_in_effect`);\n\t}\n}\n\n/**\n * Cannot commit a fork that was already discarded\n * @returns {never}\n */\nexport function fork_discarded() {\n\tif (DEV) {\n\t\tconst error = new Error(`fork_discarded\\nCannot commit a fork that was already discarded\\nhttps://svelte.dev/e/fork_discarded`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/fork_discarded`);\n\t}\n}\n\n/**\n * Cannot create a fork inside an effect or when state changes are pending\n * @returns {never}\n */\nexport function fork_timing() {\n\tif (DEV) {\n\t\tconst error = new Error(`fork_timing\\nCannot create a fork inside an effect or when state changes are pending\\nhttps://svelte.dev/e/fork_timing`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/fork_timing`);\n\t}\n}\n\n/**\n * `getAbortSignal()` can only be called inside an effect or derived\n * @returns {never}\n */\nexport function get_abort_signal_outside_reaction() {\n\tif (DEV) {\n\t\tconst error = new Error(`get_abort_signal_outside_reaction\\n\\`getAbortSignal()\\` can only be called inside an effect or derived\\nhttps://svelte.dev/e/get_abort_signal_outside_reaction`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/get_abort_signal_outside_reaction`);\n\t}\n}\n\n/**\n * Expected to find a hydratable with key `%key%` during hydration, but did not.\n * @param {string} key\n * @returns {never}\n */\nexport function hydratable_missing_but_required(key) {\n\tif (DEV) {\n\t\tconst error = new Error(`hydratable_missing_but_required\\nExpected to find a hydratable with key \\`${key}\\` during hydration, but did not.\\nhttps://svelte.dev/e/hydratable_missing_but_required`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/hydratable_missing_but_required`);\n\t}\n}\n\n/**\n * Failed to hydrate the application\n * @returns {never}\n */\nexport function hydration_failed() {\n\tif (DEV) {\n\t\tconst error = new Error(`hydration_failed\\nFailed to hydrate the application\\nhttps://svelte.dev/e/hydration_failed`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/hydration_failed`);\n\t}\n}\n\n/**\n * Could not `{@render}` snippet due to the expression being `null` or `undefined`. Consider using optional chaining `{@render snippet?.()}`\n * @returns {never}\n */\nexport function invalid_snippet() {\n\tif (DEV) {\n\t\tconst error = new Error(`invalid_snippet\\nCould not \\`{@render}\\` snippet due to the expression being \\`null\\` or \\`undefined\\`. Consider using optional chaining \\`{@render snippet?.()}\\`\\nhttps://svelte.dev/e/invalid_snippet`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/invalid_snippet`);\n\t}\n}\n\n/**\n * `%name%(...)` cannot be used in runes mode\n * @param {string} name\n * @returns {never}\n */\nexport function lifecycle_legacy_only(name) {\n\tif (DEV) {\n\t\tconst error = new Error(`lifecycle_legacy_only\\n\\`${name}(...)\\` cannot be used in runes mode\\nhttps://svelte.dev/e/lifecycle_legacy_only`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/lifecycle_legacy_only`);\n\t}\n}\n\n/**\n * Cannot do `bind:%key%={undefined}` when `%key%` has a fallback value\n * @param {string} key\n * @returns {never}\n */\nexport function props_invalid_value(key) {\n\tif (DEV) {\n\t\tconst error = new Error(`props_invalid_value\\nCannot do \\`bind:${key}={undefined}\\` when \\`${key}\\` has a fallback value\\nhttps://svelte.dev/e/props_invalid_value`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/props_invalid_value`);\n\t}\n}\n\n/**\n * Rest element properties of `$props()` such as `%property%` are readonly\n * @param {string} property\n * @returns {never}\n */\nexport function props_rest_readonly(property) {\n\tif (DEV) {\n\t\tconst error = new Error(`props_rest_readonly\\nRest element properties of \\`$props()\\` such as \\`${property}\\` are readonly\\nhttps://svelte.dev/e/props_rest_readonly`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/props_rest_readonly`);\n\t}\n}\n\n/**\n * The `%rune%` rune is only available inside `.svelte` and `.svelte.js/ts` files\n * @param {string} rune\n * @returns {never}\n */\nexport function rune_outside_svelte(rune) {\n\tif (DEV) {\n\t\tconst error = new Error(`rune_outside_svelte\\nThe \\`${rune}\\` rune is only available inside \\`.svelte\\` and \\`.svelte.js/ts\\` files\\nhttps://svelte.dev/e/rune_outside_svelte`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/rune_outside_svelte`);\n\t}\n}\n\n/**\n * `setContext` must be called when a component first initializes, not in a subsequent effect or after an `await` expression\n * @returns {never}\n */\nexport function set_context_after_init() {\n\tif (DEV) {\n\t\tconst error = new Error(`set_context_after_init\\n\\`setContext\\` must be called when a component first initializes, not in a subsequent effect or after an \\`await\\` expression\\nhttps://svelte.dev/e/set_context_after_init`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/set_context_after_init`);\n\t}\n}\n\n/**\n * Property descriptors defined on `$state` objects must contain `value` and always be `enumerable`, `configurable` and `writable`.\n * @returns {never}\n */\nexport function state_descriptors_fixed() {\n\tif (DEV) {\n\t\tconst error = new Error(`state_descriptors_fixed\\nProperty descriptors defined on \\`$state\\` objects must contain \\`value\\` and always be \\`enumerable\\`, \\`configurable\\` and \\`writable\\`.\\nhttps://svelte.dev/e/state_descriptors_fixed`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/state_descriptors_fixed`);\n\t}\n}\n\n/**\n * Cannot set prototype of `$state` object\n * @returns {never}\n */\nexport function state_prototype_fixed() {\n\tif (DEV) {\n\t\tconst error = new Error(`state_prototype_fixed\\nCannot set prototype of \\`$state\\` object\\nhttps://svelte.dev/e/state_prototype_fixed`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/state_prototype_fixed`);\n\t}\n}\n\n/**\n * Updating state inside `$derived(...)`, `$inspect(...)` or a template expression is forbidden. If the value should not be reactive, declare it without `$state`\n * @returns {never}\n */\nexport function state_unsafe_mutation() {\n\tif (DEV) {\n\t\tconst error = new Error(`state_unsafe_mutation\\nUpdating state inside \\`$derived(...)\\`, \\`$inspect(...)\\` or a template expression is forbidden. If the value should not be reactive, declare it without \\`$state\\`\\nhttps://svelte.dev/e/state_unsafe_mutation`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/state_unsafe_mutation`);\n\t}\n}\n\n/**\n * A `` `reset` function cannot be called while an error is still being handled\n * @returns {never}\n */\nexport function svelte_boundary_reset_onerror() {\n\tif (DEV) {\n\t\tconst error = new Error(`svelte_boundary_reset_onerror\\nA \\`\\` \\`reset\\` function cannot be called while an error is still being handled\\nhttps://svelte.dev/e/svelte_boundary_reset_onerror`);\n\n\t\terror.name = 'Svelte error';\n\n\t\tthrow error;\n\t} else {\n\t\tthrow new Error(`https://svelte.dev/e/svelte_boundary_reset_onerror`);\n\t}\n}","/* This file is generated by scripts/process-messages/index.js. Do not edit! */\n\nimport { DEV } from 'esm-env';\n\nvar bold = 'font-weight: bold';\nvar normal = 'font-weight: normal';\n\n/**\n * Assignment to `%property%` property (%location%) will evaluate to the right-hand side, not the value of `%property%` following the assignment. This may result in unexpected behaviour.\n * @param {string} property\n * @param {string} location\n */\nexport function assignment_value_stale(property, location) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] assignment_value_stale\\n%cAssignment to \\`${property}\\` property (${location}) will evaluate to the right-hand side, not the value of \\`${property}\\` following the assignment. This may result in unexpected behaviour.\\nhttps://svelte.dev/e/assignment_value_stale`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/assignment_value_stale`);\n\t}\n}\n\n/**\n * Detected reactivity loss when reading `%name%`. This happens when state is read in an async function after an earlier `await`\n * @param {string} name\n */\nexport function await_reactivity_loss(name) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] await_reactivity_loss\\n%cDetected reactivity loss when reading \\`${name}\\`. This happens when state is read in an async function after an earlier \\`await\\`\\nhttps://svelte.dev/e/await_reactivity_loss`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/await_reactivity_loss`);\n\t}\n}\n\n/**\n * An async derived, `%name%` (%location%) was not read immediately after it resolved. This often indicates an unnecessary waterfall, which can slow down your app\n * @param {string} name\n * @param {string} location\n */\nexport function await_waterfall(name, location) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] await_waterfall\\n%cAn async derived, \\`${name}\\` (${location}) was not read immediately after it resolved. This often indicates an unnecessary waterfall, which can slow down your app\\nhttps://svelte.dev/e/await_waterfall`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/await_waterfall`);\n\t}\n}\n\n/**\n * `%binding%` (%location%) is binding to a non-reactive property\n * @param {string} binding\n * @param {string | undefined | null} [location]\n */\nexport function binding_property_non_reactive(binding, location) {\n\tif (DEV) {\n\t\tconsole.warn(\n\t\t\t`%c[svelte] binding_property_non_reactive\\n%c${location\n\t\t\t\t? `\\`${binding}\\` (${location}) is binding to a non-reactive property`\n\t\t\t\t: `\\`${binding}\\` is binding to a non-reactive property`}\\nhttps://svelte.dev/e/binding_property_non_reactive`,\n\t\t\tbold,\n\t\t\tnormal\n\t\t);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/binding_property_non_reactive`);\n\t}\n}\n\n/**\n * Your `console.%method%` contained `$state` proxies. Consider using `$inspect(...)` or `$state.snapshot(...)` instead\n * @param {string} method\n */\nexport function console_log_state(method) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] console_log_state\\n%cYour \\`console.${method}\\` contained \\`$state\\` proxies. Consider using \\`$inspect(...)\\` or \\`$state.snapshot(...)\\` instead\\nhttps://svelte.dev/e/console_log_state`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/console_log_state`);\n\t}\n}\n\n/**\n * Reading a derived belonging to a now-destroyed effect may result in stale values\n */\nexport function derived_inert() {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] derived_inert\\n%cReading a derived belonging to a now-destroyed effect may result in stale values\\nhttps://svelte.dev/e/derived_inert`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/derived_inert`);\n\t}\n}\n\n/**\n * %handler% should be a function. Did you mean to %suggestion%?\n * @param {string} handler\n * @param {string} suggestion\n */\nexport function event_handler_invalid(handler, suggestion) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] event_handler_invalid\\n%c${handler} should be a function. Did you mean to ${suggestion}?\\nhttps://svelte.dev/e/event_handler_invalid`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/event_handler_invalid`);\n\t}\n}\n\n/**\n * Expected to find a hydratable with key `%key%` during hydration, but did not.\n * @param {string} key\n */\nexport function hydratable_missing_but_expected(key) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] hydratable_missing_but_expected\\n%cExpected to find a hydratable with key \\`${key}\\` during hydration, but did not.\\nhttps://svelte.dev/e/hydratable_missing_but_expected`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/hydratable_missing_but_expected`);\n\t}\n}\n\n/**\n * The `%attribute%` attribute on `%html%` changed its value between server and client renders. The client value, `%value%`, will be ignored in favour of the server value\n * @param {string} attribute\n * @param {string} html\n * @param {string} value\n */\nexport function hydration_attribute_changed(attribute, html, value) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] hydration_attribute_changed\\n%cThe \\`${attribute}\\` attribute on \\`${html}\\` changed its value between server and client renders. The client value, \\`${value}\\`, will be ignored in favour of the server value\\nhttps://svelte.dev/e/hydration_attribute_changed`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/hydration_attribute_changed`);\n\t}\n}\n\n/**\n * The value of an `{@html ...}` block %location% changed between server and client renders. The client value will be ignored in favour of the server value\n * @param {string | undefined | null} [location]\n */\nexport function hydration_html_changed(location) {\n\tif (DEV) {\n\t\tconsole.warn(\n\t\t\t`%c[svelte] hydration_html_changed\\n%c${location\n\t\t\t\t? `The value of an \\`{@html ...}\\` block ${location} changed between server and client renders. The client value will be ignored in favour of the server value`\n\t\t\t\t: 'The value of an `{@html ...}` block changed between server and client renders. The client value will be ignored in favour of the server value'}\\nhttps://svelte.dev/e/hydration_html_changed`,\n\t\t\tbold,\n\t\t\tnormal\n\t\t);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/hydration_html_changed`);\n\t}\n}\n\n/**\n * Hydration failed because the initial UI does not match what was rendered on the server. The error occurred near %location%\n * @param {string | undefined | null} [location]\n */\nexport function hydration_mismatch(location) {\n\tif (DEV) {\n\t\tconsole.warn(\n\t\t\t`%c[svelte] hydration_mismatch\\n%c${location\n\t\t\t\t? `Hydration failed because the initial UI does not match what was rendered on the server. The error occurred near ${location}`\n\t\t\t\t: 'Hydration failed because the initial UI does not match what was rendered on the server'}\\nhttps://svelte.dev/e/hydration_mismatch`,\n\t\t\tbold,\n\t\t\tnormal\n\t\t);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/hydration_mismatch`);\n\t}\n}\n\n/**\n * The `render` function passed to `createRawSnippet` should return HTML for a single element\n */\nexport function invalid_raw_snippet_render() {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] invalid_raw_snippet_render\\n%cThe \\`render\\` function passed to \\`createRawSnippet\\` should return HTML for a single element\\nhttps://svelte.dev/e/invalid_raw_snippet_render`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/invalid_raw_snippet_render`);\n\t}\n}\n\n/**\n * Detected a migrated `$:` reactive block in `%filename%` that both accesses and updates the same reactive value. This may cause recursive updates when converted to an `$effect`.\n * @param {string} filename\n */\nexport function legacy_recursive_reactive_block(filename) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] legacy_recursive_reactive_block\\n%cDetected a migrated \\`$:\\` reactive block in \\`${filename}\\` that both accesses and updates the same reactive value. This may cause recursive updates when converted to an \\`$effect\\`.\\nhttps://svelte.dev/e/legacy_recursive_reactive_block`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/legacy_recursive_reactive_block`);\n\t}\n}\n\n/**\n * Tried to unmount a component that was not mounted\n */\nexport function lifecycle_double_unmount() {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] lifecycle_double_unmount\\n%cTried to unmount a component that was not mounted\\nhttps://svelte.dev/e/lifecycle_double_unmount`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/lifecycle_double_unmount`);\n\t}\n}\n\n/**\n * %parent% passed property `%prop%` to %child% with `bind:`, but its parent component %owner% did not declare `%prop%` as a binding. Consider creating a binding between %owner% and %parent% (e.g. `bind:%prop%={...}` instead of `%prop%={...}`)\n * @param {string} parent\n * @param {string} prop\n * @param {string} child\n * @param {string} owner\n */\nexport function ownership_invalid_binding(parent, prop, child, owner) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] ownership_invalid_binding\\n%c${parent} passed property \\`${prop}\\` to ${child} with \\`bind:\\`, but its parent component ${owner} did not declare \\`${prop}\\` as a binding. Consider creating a binding between ${owner} and ${parent} (e.g. \\`bind:${prop}={...}\\` instead of \\`${prop}={...}\\`)\\nhttps://svelte.dev/e/ownership_invalid_binding`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/ownership_invalid_binding`);\n\t}\n}\n\n/**\n * Mutating unbound props (`%name%`, at %location%) is strongly discouraged. Consider using `bind:%prop%={...}` in %parent% (or using a callback) instead\n * @param {string} name\n * @param {string} location\n * @param {string} prop\n * @param {string} parent\n */\nexport function ownership_invalid_mutation(name, location, prop, parent) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] ownership_invalid_mutation\\n%cMutating unbound props (\\`${name}\\`, at ${location}) is strongly discouraged. Consider using \\`bind:${prop}={...}\\` in ${parent} (or using a callback) instead\\nhttps://svelte.dev/e/ownership_invalid_mutation`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/ownership_invalid_mutation`);\n\t}\n}\n\n/**\n * The `value` property of a `\\` element should be an array, but it received a non-array value. The selection will be kept as is.\\nhttps://svelte.dev/e/select_multiple_invalid_value`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/select_multiple_invalid_value`);\n\t}\n}\n\n/**\n * Reactive `$state(...)` proxies and the values they proxy have different identities. Because of this, comparisons with `%operator%` will produce unexpected results\n * @param {string} operator\n */\nexport function state_proxy_equality_mismatch(operator) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] state_proxy_equality_mismatch\\n%cReactive \\`$state(...)\\` proxies and the values they proxy have different identities. Because of this, comparisons with \\`${operator}\\` will produce unexpected results\\nhttps://svelte.dev/e/state_proxy_equality_mismatch`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/state_proxy_equality_mismatch`);\n\t}\n}\n\n/**\n * Tried to unmount a state proxy, rather than a component\n */\nexport function state_proxy_unmount() {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] state_proxy_unmount\\n%cTried to unmount a state proxy, rather than a component\\nhttps://svelte.dev/e/state_proxy_unmount`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/state_proxy_unmount`);\n\t}\n}\n\n/**\n * A `` `reset` function only resets the boundary the first time it is called\n */\nexport function svelte_boundary_reset_noop() {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] svelte_boundary_reset_noop\\n%cA \\`\\` \\`reset\\` function only resets the boundary the first time it is called\\nhttps://svelte.dev/e/svelte_boundary_reset_noop`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/svelte_boundary_reset_noop`);\n\t}\n}\n\n/**\n * The `slide` transition does not work correctly for elements with `display: %value%`\n * @param {string} value\n */\nexport function transition_slide_display(value) {\n\tif (DEV) {\n\t\tconsole.warn(`%c[svelte] transition_slide_display\\n%cThe \\`slide\\` transition does not work correctly for elements with \\`display: ${value}\\`\\nhttps://svelte.dev/e/transition_slide_display`, bold, normal);\n\t} else {\n\t\tconsole.warn(`https://svelte.dev/e/transition_slide_display`);\n\t}\n}","/** @import { TemplateNode } from '#client' */\n\nimport { COMMENT_NODE } from '#client/constants';\nimport {\n\tHYDRATION_END,\n\tHYDRATION_ERROR,\n\tHYDRATION_START,\n\tHYDRATION_START_ELSE\n} from '../../../constants.js';\nimport * as w from '../warnings.js';\nimport { get_next_sibling } from './operations.js';\n\n/**\n * Use this variable to guard everything related to hydration code so it can be treeshaken out\n * if the user doesn't use the `hydrate` method and these code paths are therefore not needed.\n */\nexport let hydrating = false;\n\n/** @param {boolean} value */\nexport function set_hydrating(value) {\n\thydrating = value;\n}\n\n/**\n * The node that is currently being hydrated. This starts out as the first node inside the opening\n * comment, and updates each time a component calls `$.child(...)` or `$.sibling(...)`.\n * When entering a block (e.g. `{#if ...}`), `hydrate_node` is the block opening comment; by the\n * time we leave the block it is the closing comment, which serves as the block's anchor.\n * @type {TemplateNode}\n */\nexport let hydrate_node;\n\n/** @param {TemplateNode | null} node */\nexport function set_hydrate_node(node) {\n\tif (node === null) {\n\t\tw.hydration_mismatch();\n\t\tthrow HYDRATION_ERROR;\n\t}\n\n\treturn (hydrate_node = node);\n}\n\nexport function hydrate_next() {\n\treturn set_hydrate_node(get_next_sibling(hydrate_node));\n}\n\n/** @param {TemplateNode} node */\nexport function reset(node) {\n\tif (!hydrating) return;\n\n\t// If the node has remaining siblings, something has gone wrong\n\tif (get_next_sibling(hydrate_node) !== null) {\n\t\tw.hydration_mismatch();\n\t\tthrow HYDRATION_ERROR;\n\t}\n\n\thydrate_node = node;\n}\n\n/**\n * @param {HTMLTemplateElement} template\n */\nexport function hydrate_template(template) {\n\tif (hydrating) {\n\t\t// @ts-expect-error TemplateNode doesn't include DocumentFragment, but it's actually fine\n\t\thydrate_node = template.content;\n\t}\n}\n\nexport function next(count = 1) {\n\tif (hydrating) {\n\t\tvar i = count;\n\t\tvar node = hydrate_node;\n\n\t\twhile (i--) {\n\t\t\tnode = /** @type {TemplateNode} */ (get_next_sibling(node));\n\t\t}\n\n\t\thydrate_node = node;\n\t}\n}\n\n/**\n * Skips or removes (depending on {@link remove}) all nodes starting at `hydrate_node` up until the next hydration end comment\n * @param {boolean} remove\n */\nexport function skip_nodes(remove = true) {\n\tvar depth = 0;\n\tvar node = hydrate_node;\n\n\twhile (true) {\n\t\tif (node.nodeType === COMMENT_NODE) {\n\t\t\tvar data = /** @type {Comment} */ (node).data;\n\n\t\t\tif (data === HYDRATION_END) {\n\t\t\t\tif (depth === 0) return node;\n\t\t\t\tdepth -= 1;\n\t\t\t} else if (\n\t\t\t\tdata === HYDRATION_START ||\n\t\t\t\tdata === HYDRATION_START_ELSE ||\n\t\t\t\t// \"[1\", \"[2\", etc. for if blocks\n\t\t\t\t(data[0] === '[' && !isNaN(Number(data.slice(1))))\n\t\t\t) {\n\t\t\t\tdepth += 1;\n\t\t\t}\n\t\t}\n\n\t\tvar next = /** @type {TemplateNode} */ (get_next_sibling(node));\n\t\tif (remove) node.remove();\n\t\tnode = next;\n\t}\n}\n\n/**\n *\n * @param {TemplateNode} node\n */\nexport function read_hydration_instruction(node) {\n\tif (!node || node.nodeType !== COMMENT_NODE) {\n\t\tw.hydration_mismatch();\n\t\tthrow HYDRATION_ERROR;\n\t}\n\n\treturn /** @type {Comment} */ (node).data;\n}\n","/** @import { Equals } from '#client' */\n\n/** @type {Equals} */\nexport function equals(value) {\n\treturn value === this.v;\n}\n\n/**\n * @param {unknown} a\n * @param {unknown} b\n * @returns {boolean}\n */\nexport function safe_not_equal(a, b) {\n\treturn a != a\n\t\t? b == b\n\t\t: a !== b || (a !== null && typeof a === 'object') || typeof a === 'function';\n}\n\n/**\n * @param {unknown} a\n * @param {unknown} b\n * @returns {boolean}\n */\nexport function not_equal(a, b) {\n\treturn a !== b;\n}\n\n/** @type {Equals} */\nexport function safe_equals(value) {\n\treturn !safe_not_equal(value, this.v);\n}\n","/** True if experimental.async=true */\nexport let async_mode_flag = false;\n/** True if we're not certain that we only have Svelte 5 code in the compilation */\nexport let legacy_mode_flag = false;\n/** True if $inspect.trace is used */\nexport let tracing_mode_flag = false;\n\nexport function enable_async_mode_flag() {\n\tasync_mode_flag = true;\n}\n\n/** ONLY USE THIS DURING TESTING */\nexport function disable_async_mode_flag() {\n\tasync_mode_flag = false;\n}\n\nexport function enable_legacy_mode_flag() {\n\tlegacy_mode_flag = true;\n}\n\nexport function enable_tracing_mode_flag() {\n\ttracing_mode_flag = true;\n}\n","/** @import { ComponentContext, DevStackEntry, Effect } from '#client' */\nimport { DEV } from 'esm-env';\nimport * as e from './errors.js';\nimport { active_effect, active_reaction } from './runtime.js';\nimport { create_user_effect } from './reactivity/effects.js';\nimport { async_mode_flag, legacy_mode_flag } from '../flags/index.js';\nimport { FILENAME } from '../../constants.js';\nimport { BRANCH_EFFECT } from './constants.js';\n\n/** @type {ComponentContext | null} */\nexport let component_context = null;\n\n/** @param {ComponentContext | null} context */\nexport function set_component_context(context) {\n\tcomponent_context = context;\n}\n\n/** @type {DevStackEntry | null} */\nexport let dev_stack = null;\n\n/** @param {DevStackEntry | null} stack */\nexport function set_dev_stack(stack) {\n\tdev_stack = stack;\n}\n\n/**\n * Execute a callback with a new dev stack entry\n * @param {() => any} callback - Function to execute\n * @param {DevStackEntry['type']} type - Type of block/component\n * @param {any} component - Component function\n * @param {number} line - Line number\n * @param {number} column - Column number\n * @param {Record} [additional] - Any additional properties to add to the dev stack entry\n * @returns {any}\n */\nexport function add_svelte_meta(callback, type, component, line, column, additional) {\n\tconst parent = dev_stack;\n\n\tdev_stack = {\n\t\ttype,\n\t\tfile: component[FILENAME],\n\t\tline,\n\t\tcolumn,\n\t\tparent,\n\t\t...additional\n\t};\n\n\ttry {\n\t\treturn callback();\n\t} finally {\n\t\tdev_stack = parent;\n\t}\n}\n\n/**\n * The current component function. Different from current component context:\n * ```html\n * \n * \n * \n * \n * ```\n * @type {ComponentContext['function']}\n */\nexport let dev_current_component_function = null;\n\n/** @param {ComponentContext['function']} fn */\nexport function set_dev_current_component_function(fn) {\n\tdev_current_component_function = fn;\n}\n\n/**\n * Returns a `[get, set]` pair of functions for working with context in a type-safe way.\n *\n * `get` will throw an error if no parent component called `set`.\n *\n * @template T\n * @returns {[() => T, (context: T) => T]}\n * @since 5.40.0\n */\nexport function createContext() {\n\tconst key = {};\n\n\treturn [\n\t\t() => {\n\t\t\tif (!hasContext(key)) {\n\t\t\t\te.missing_context();\n\t\t\t}\n\n\t\t\treturn getContext(key);\n\t\t},\n\t\t(context) => setContext(key, context)\n\t];\n}\n\n/**\n * Retrieves the context that belongs to the closest parent component with the specified `key`.\n * Must be called during component initialisation.\n *\n * [`createContext`](https://svelte.dev/docs/svelte/svelte#createContext) is a type-safe alternative.\n *\n * @template T\n * @param {any} key\n * @returns {T}\n */\nexport function getContext(key) {\n\tconst context_map = get_or_init_context_map('getContext');\n\tconst result = /** @type {T} */ (context_map.get(key));\n\treturn result;\n}\n\n/**\n * Associates an arbitrary `context` object with the current component and the specified `key`\n * and returns that object. The context is then available to children of the component\n * (including slotted content) with `getContext`.\n *\n * Like lifecycle functions, this must be called during component initialisation.\n *\n * [`createContext`](https://svelte.dev/docs/svelte/svelte#createContext) is a type-safe alternative.\n *\n * @template T\n * @param {any} key\n * @param {T} context\n * @returns {T}\n */\nexport function setContext(key, context) {\n\tconst context_map = get_or_init_context_map('setContext');\n\n\tif (async_mode_flag) {\n\t\tvar flags = /** @type {Effect} */ (active_effect).f;\n\t\tvar valid =\n\t\t\t!active_reaction &&\n\t\t\t(flags & BRANCH_EFFECT) !== 0 &&\n\t\t\t// pop() runs synchronously, so this indicates we're setting context after an await\n\t\t\t!(/** @type {ComponentContext} */ (component_context).i);\n\n\t\tif (!valid) {\n\t\t\te.set_context_after_init();\n\t\t}\n\t}\n\n\tcontext_map.set(key, context);\n\treturn context;\n}\n\n/**\n * Checks whether a given `key` has been set in the context of a parent component.\n * Must be called during component initialisation.\n *\n * @param {any} key\n * @returns {boolean}\n */\nexport function hasContext(key) {\n\tconst context_map = get_or_init_context_map('hasContext');\n\treturn context_map.has(key);\n}\n\n/**\n * Retrieves the whole context map that belongs to the closest parent component.\n * Must be called during component initialisation. Useful, for example, if you\n * programmatically create a component and want to pass the existing context to it.\n *\n * @template {Map} [T=Map]\n * @returns {T}\n */\nexport function getAllContexts() {\n\tconst context_map = get_or_init_context_map('getAllContexts');\n\treturn /** @type {T} */ (context_map);\n}\n\n/**\n * @param {Record} props\n * @param {any} runes\n * @param {Function} [fn]\n * @returns {void}\n */\nexport function push(props, runes = false, fn) {\n\tcomponent_context = {\n\t\tp: component_context,\n\t\ti: false,\n\t\tc: null,\n\t\te: null,\n\t\ts: props,\n\t\tx: null,\n\t\tr: /** @type {Effect} */ (active_effect),\n\t\tl: legacy_mode_flag && !runes ? { s: null, u: null, $: [] } : null\n\t};\n\n\tif (DEV) {\n\t\t// component function\n\t\tcomponent_context.function = fn;\n\t\tdev_current_component_function = fn;\n\t}\n}\n\n/**\n * @template {Record} T\n * @param {T} [component]\n * @returns {T}\n */\nexport function pop(component) {\n\tvar context = /** @type {ComponentContext} */ (component_context);\n\tvar effects = context.e;\n\n\tif (effects !== null) {\n\t\tcontext.e = null;\n\n\t\tfor (var fn of effects) {\n\t\t\tcreate_user_effect(fn);\n\t\t}\n\t}\n\n\tif (component !== undefined) {\n\t\tcontext.x = component;\n\t}\n\n\tcontext.i = true;\n\n\tcomponent_context = context.p;\n\n\tif (DEV) {\n\t\tdev_current_component_function = component_context?.function ?? null;\n\t}\n\n\treturn component ?? /** @type {T} */ ({});\n}\n\n/** @returns {boolean} */\nexport function is_runes() {\n\treturn !legacy_mode_flag || (component_context !== null && component_context.l === null);\n}\n\n/**\n * @param {string} name\n * @returns {Map}\n */\nfunction get_or_init_context_map(name) {\n\tif (component_context === null) {\n\t\te.lifecycle_outside_component(name);\n\t}\n\n\treturn (component_context.c ??= new Map(get_parent_context(component_context) || undefined));\n}\n\n/**\n * @param {ComponentContext} component_context\n * @returns {Map | null}\n */\nfunction get_parent_context(component_context) {\n\tlet parent = component_context.p;\n\twhile (parent !== null) {\n\t\tconst context_map = parent.c;\n\t\tif (context_map !== null) {\n\t\t\treturn context_map;\n\t\t}\n\t\tparent = parent.p;\n\t}\n\treturn null;\n}\n","import { run_all } from '../../shared/utils.js';\nimport { is_flushing_sync } from '../reactivity/batch.js';\n\n/** @type {Array<() => void>} */\nlet micro_tasks = [];\n\nfunction run_micro_tasks() {\n\tvar tasks = micro_tasks;\n\tmicro_tasks = [];\n\trun_all(tasks);\n}\n\n/**\n * @param {() => void} fn\n */\nexport function queue_micro_task(fn) {\n\tif (micro_tasks.length === 0 && !is_flushing_sync) {\n\t\tvar tasks = micro_tasks;\n\t\tqueueMicrotask(() => {\n\t\t\t// If this is false, a flushSync happened in the meantime. Do _not_ run new scheduled microtasks in that case\n\t\t\t// as the ordering of microtasks would be broken at that point - consider this case:\n\t\t\t// - queue_micro_task schedules microtask A to flush task X\n\t\t\t// - synchronously after, flushSync runs, processing task X\n\t\t\t// - synchronously after, some other microtask B is scheduled, but not through queue_micro_task but for example a Promise.resolve() in user code\n\t\t\t// - synchronously after, queue_micro_task schedules microtask C to flush task Y\n\t\t\t// - one tick later, microtask A now resolves, flushing task Y before microtask B, which is incorrect\n\t\t\t// This if check prevents that race condition (that realistically will only happen in tests)\n\t\t\tif (tasks === micro_tasks) run_micro_tasks();\n\t\t});\n\t}\n\n\tmicro_tasks.push(fn);\n}\n\n/**\n * Synchronously run any queued tasks.\n */\nexport function flush_tasks() {\n\twhile (micro_tasks.length > 0) {\n\t\trun_micro_tasks();\n\t}\n}\n","/** @import { Derived, Effect } from '#client' */\n/** @import { Boundary } from './dom/blocks/boundary.js' */\nimport { DEV } from 'esm-env';\nimport { FILENAME } from '../../constants.js';\nimport { is_firefox } from './dom/operations.js';\nimport { ERROR_VALUE, BOUNDARY_EFFECT, REACTION_RAN, EFFECT } from './constants.js';\nimport { define_property, get_descriptor } from '../shared/utils.js';\nimport { active_effect, active_reaction } from './runtime.js';\n\nconst adjustments = new WeakMap();\n\n/**\n * @param {unknown} error\n */\nexport function handle_error(error) {\n\tvar effect = active_effect;\n\n\t// for unowned deriveds, don't throw until we read the value\n\tif (effect === null) {\n\t\t/** @type {Derived} */ (active_reaction).f |= ERROR_VALUE;\n\t\treturn error;\n\t}\n\n\tif (DEV && error instanceof Error && !adjustments.has(error)) {\n\t\tadjustments.set(error, get_adjustments(error, effect));\n\t}\n\n\t// if the error occurred while creating this subtree, we let it\n\t// bubble up until it hits a boundary that can handle it, unless\n\t// it's an $effect in which case it doesn't run immediately\n\tif ((effect.f & REACTION_RAN) === 0 && (effect.f & EFFECT) === 0) {\n\t\tif (DEV && !effect.parent && error instanceof Error) {\n\t\t\tapply_adjustments(error);\n\t\t}\n\n\t\tthrow error;\n\t}\n\n\t// otherwise we bubble up the effect tree ourselves\n\tinvoke_error_boundary(error, effect);\n}\n\n/**\n * @param {unknown} error\n * @param {Effect | null} effect\n */\nexport function invoke_error_boundary(error, effect) {\n\twhile (effect !== null) {\n\t\tif ((effect.f & BOUNDARY_EFFECT) !== 0) {\n\t\t\tif ((effect.f & REACTION_RAN) === 0) {\n\t\t\t\t// we are still creating the boundary effect\n\t\t\t\tthrow error;\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t/** @type {Boundary} */ (effect.b).error(error);\n\t\t\t\treturn;\n\t\t\t} catch (e) {\n\t\t\t\terror = e;\n\t\t\t}\n\t\t}\n\n\t\teffect = effect.parent;\n\t}\n\n\tif (DEV && error instanceof Error) {\n\t\tapply_adjustments(error);\n\t}\n\n\tthrow error;\n}\n\n/**\n * Add useful information to the error message/stack in development\n * @param {Error} error\n * @param {Effect} effect\n */\nfunction get_adjustments(error, effect) {\n\tconst message_descriptor = get_descriptor(error, 'message');\n\n\t// if the message was already changed and it's not configurable we can't change it\n\t// or it will throw a different error swallowing the original error\n\tif (message_descriptor && !message_descriptor.configurable) return;\n\n\tvar indent = is_firefox ? ' ' : '\\t';\n\tvar component_stack = `\\n${indent}in ${effect.fn?.name || ''}`;\n\tvar context = effect.ctx;\n\n\twhile (context !== null) {\n\t\tcomponent_stack += `\\n${indent}in ${context.function?.[FILENAME].split('/').pop()}`;\n\t\tcontext = context.p;\n\t}\n\n\treturn {\n\t\tmessage: error.message + `\\n${component_stack}\\n`,\n\t\tstack: error.stack\n\t\t\t?.split('\\n')\n\t\t\t.filter((line) => !line.includes('svelte/src/internal'))\n\t\t\t.join('\\n')\n\t};\n}\n\n/**\n * @param {Error} error\n */\nfunction apply_adjustments(error) {\n\tconst adjusted = adjustments.get(error);\n\n\tif (adjusted) {\n\t\tdefine_property(error, 'message', {\n\t\t\tvalue: adjusted.message\n\t\t});\n\n\t\tdefine_property(error, 'stack', {\n\t\t\tvalue: adjusted.stack\n\t\t});\n\t}\n}\n","/** @import { Derived, Signal } from '#client' */\nimport { CLEAN, CONNECTED, DIRTY, MAYBE_DIRTY } from '#client/constants';\n\nconst STATUS_MASK = ~(DIRTY | MAYBE_DIRTY | CLEAN);\n\n/**\n * @param {Signal} signal\n * @param {number} status\n */\nexport function set_signal_status(signal, status) {\n\tsignal.f = (signal.f & STATUS_MASK) | status;\n}\n\n/**\n * Set a derived's status to CLEAN or MAYBE_DIRTY based on its connection state.\n * @param {Derived} derived\n */\nexport function update_derived_status(derived) {\n\t// Only mark as MAYBE_DIRTY if disconnected and has dependencies.\n\tif ((derived.f & CONNECTED) !== 0 || derived.deps === null) {\n\t\tset_signal_status(derived, CLEAN);\n\t} else {\n\t\tset_signal_status(derived, MAYBE_DIRTY);\n\t}\n}\n","/** @import { Derived, Effect, Value } from '#client' */\nimport { CLEAN, DERIVED, DIRTY, MAYBE_DIRTY, WAS_MARKED } from '#client/constants';\nimport { set_signal_status } from './status.js';\n\n/**\n * @param {Value[] | null} deps\n */\nfunction clear_marked(deps) {\n\tif (deps === null) return;\n\n\tfor (const dep of deps) {\n\t\tif ((dep.f & DERIVED) === 0 || (dep.f & WAS_MARKED) === 0) {\n\t\t\tcontinue;\n\t\t}\n\n\t\tdep.f ^= WAS_MARKED;\n\n\t\tclear_marked(/** @type {Derived} */ (dep).deps);\n\t}\n}\n\n/**\n * @param {Effect} effect\n * @param {Set} dirty_effects\n * @param {Set} maybe_dirty_effects\n */\nexport function defer_effect(effect, dirty_effects, maybe_dirty_effects) {\n\tif ((effect.f & DIRTY) !== 0) {\n\t\tdirty_effects.add(effect);\n\t} else if ((effect.f & MAYBE_DIRTY) !== 0) {\n\t\tmaybe_dirty_effects.add(effect);\n\t}\n\n\t// Since we're not executing these effects now, we need to clear any WAS_MARKED flags\n\t// so that other batches can correctly reach these effects during their own traversal\n\tclear_marked(effect.deps);\n\n\t// mark as clean so they get scheduled if they depend on pending async state\n\tset_signal_status(effect, CLEAN);\n}\n","/** @import { Readable } from './public' */\nimport { untrack } from '../internal/client/runtime.js';\nimport { noop } from '../internal/shared/utils.js';\n\n/**\n * @template T\n * @param {Readable | null | undefined} store\n * @param {(value: T) => void} run\n * @param {(value: T) => void} [invalidate]\n * @returns {() => void}\n */\nexport function subscribe_to_store(store, run, invalidate) {\n\tif (store == null) {\n\t\t// @ts-expect-error\n\t\trun(undefined);\n\n\t\t// @ts-expect-error\n\t\tif (invalidate) invalidate(undefined);\n\n\t\treturn noop;\n\t}\n\n\t// Svelte store takes a private second argument\n\t// StartStopNotifier could mutate state, and we want to silence the corresponding validation error\n\tconst unsub = untrack(() =>\n\t\tstore.subscribe(\n\t\t\trun,\n\t\t\t// @ts-expect-error\n\t\t\tinvalidate\n\t\t)\n\t);\n\n\t// Also support RxJS\n\t// @ts-expect-error TODO fix this in the types?\n\treturn unsub.unsubscribe ? () => unsub.unsubscribe() : unsub;\n}\n","/** @import { Readable, StartStopNotifier, Subscriber, Unsubscriber, Updater, Writable } from '../public.js' */\n/** @import { Stores, StoresValues, SubscribeInvalidateTuple } from '../private.js' */\nimport { noop, run_all } from '../../internal/shared/utils.js';\nimport { safe_not_equal } from '../../internal/client/reactivity/equality.js';\nimport { subscribe_to_store } from '../utils.js';\n\n/**\n * @type {Array | any>}\n */\nconst subscriber_queue = [];\n\n/**\n * Creates a `Readable` store that allows reading by subscription.\n *\n * @template T\n * @param {T} [value] initial value\n * @param {StartStopNotifier} [start]\n * @returns {Readable}\n */\nexport function readable(value, start) {\n\treturn {\n\t\tsubscribe: writable(value, start).subscribe\n\t};\n}\n\n/**\n * Create a `Writable` store that allows both updating and reading by subscription.\n *\n * @template T\n * @param {T} [value] initial value\n * @param {StartStopNotifier} [start]\n * @returns {Writable}\n */\nexport function writable(value, start = noop) {\n\t/** @type {Unsubscriber | null} */\n\tlet stop = null;\n\n\t/** @type {Set>} */\n\tconst subscribers = new Set();\n\n\t/**\n\t * @param {T} new_value\n\t * @returns {void}\n\t */\n\tfunction set(new_value) {\n\t\tif (safe_not_equal(value, new_value)) {\n\t\t\tvalue = new_value;\n\t\t\tif (stop) {\n\t\t\t\t// store is ready\n\t\t\t\tconst run_queue = !subscriber_queue.length;\n\t\t\t\tfor (const subscriber of subscribers) {\n\t\t\t\t\tsubscriber[1]();\n\t\t\t\t\tsubscriber_queue.push(subscriber, value);\n\t\t\t\t}\n\t\t\t\tif (run_queue) {\n\t\t\t\t\tfor (let i = 0; i < subscriber_queue.length; i += 2) {\n\t\t\t\t\t\tsubscriber_queue[i][0](subscriber_queue[i + 1]);\n\t\t\t\t\t}\n\t\t\t\t\tsubscriber_queue.length = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * @param {Updater} fn\n\t * @returns {void}\n\t */\n\tfunction update(fn) {\n\t\tset(fn(/** @type {T} */ (value)));\n\t}\n\n\t/**\n\t * @param {Subscriber} run\n\t * @param {() => void} [invalidate]\n\t * @returns {Unsubscriber}\n\t */\n\tfunction subscribe(run, invalidate = noop) {\n\t\t/** @type {SubscribeInvalidateTuple} */\n\t\tconst subscriber = [run, invalidate];\n\t\tsubscribers.add(subscriber);\n\t\tif (subscribers.size === 1) {\n\t\t\tstop = start(set, update) || noop;\n\t\t}\n\t\trun(/** @type {T} */ (value));\n\t\treturn () => {\n\t\t\tsubscribers.delete(subscriber);\n\t\t\tif (subscribers.size === 0 && stop) {\n\t\t\t\tstop();\n\t\t\t\tstop = null;\n\t\t\t}\n\t\t};\n\t}\n\treturn { set, update, subscribe };\n}\n\n/**\n * Derived value store by synchronizing one or more readable stores and\n * applying an aggregation function over its input values.\n *\n * @template {Stores} S\n * @template T\n * @overload\n * @param {S} stores\n * @param {(values: StoresValues, set: (value: T) => void, update: (fn: Updater) => void) => Unsubscriber | void} fn\n * @param {T} [initial_value]\n * @returns {Readable}\n */\n/**\n * Derived value store by synchronizing one or more readable stores and\n * applying an aggregation function over its input values.\n *\n * @template {Stores} S\n * @template T\n * @overload\n * @param {S} stores\n * @param {(values: StoresValues) => T} fn\n * @param {T} [initial_value]\n * @returns {Readable}\n */\n/**\n * @template {Stores} S\n * @template T\n * @param {S} stores\n * @param {Function} fn\n * @param {T} [initial_value]\n * @returns {Readable}\n */\nexport function derived(stores, fn, initial_value) {\n\tconst single = !Array.isArray(stores);\n\t/** @type {Array>} */\n\tconst stores_array = single ? [stores] : stores;\n\tif (!stores_array.every(Boolean)) {\n\t\tthrow new Error('derived() expects stores as input, got a falsy value');\n\t}\n\tconst auto = fn.length < 2;\n\treturn readable(initial_value, (set, update) => {\n\t\tlet started = false;\n\t\t/** @type {T[]} */\n\t\tconst values = [];\n\t\tlet pending = 0;\n\t\tlet cleanup = noop;\n\t\tconst sync = () => {\n\t\t\tif (pending) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tcleanup();\n\t\t\tconst result = fn(single ? values[0] : values, set, update);\n\t\t\tif (auto) {\n\t\t\t\tset(result);\n\t\t\t} else {\n\t\t\t\tcleanup = typeof result === 'function' ? result : noop;\n\t\t\t}\n\t\t};\n\t\tconst unsubscribers = stores_array.map((store, i) =>\n\t\t\tsubscribe_to_store(\n\t\t\t\tstore,\n\t\t\t\t(value) => {\n\t\t\t\t\tvalues[i] = value;\n\t\t\t\t\tpending &= ~(1 << i);\n\t\t\t\t\tif (started) {\n\t\t\t\t\t\tsync();\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t() => {\n\t\t\t\t\tpending |= 1 << i;\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t\tstarted = true;\n\t\tsync();\n\t\treturn function stop() {\n\t\t\trun_all(unsubscribers);\n\t\t\tcleanup();\n\t\t\t// We need to set this to false because callbacks can still happen despite having unsubscribed:\n\t\t\t// Callbacks might already be placed in the queue which doesn't know it should no longer\n\t\t\t// invoke this derived store.\n\t\t\tstarted = false;\n\t\t};\n\t});\n}\n\n/**\n * Takes a store and returns a new one derived from the old one that is readable.\n *\n * @template T\n * @param {Readable} store - store to make readonly\n * @returns {Readable}\n */\nexport function readonly(store) {\n\treturn {\n\t\t// @ts-expect-error TODO i suspect the bind is unnecessary\n\t\tsubscribe: store.subscribe.bind(store)\n\t};\n}\n\n/**\n * Get the current value from a store by subscribing and immediately unsubscribing.\n *\n * @template T\n * @param {Readable} store\n * @returns {T}\n */\nexport function get(store) {\n\tlet value;\n\tsubscribe_to_store(store, (_) => (value = _))();\n\t// @ts-expect-error\n\treturn value;\n}\n","/** @import { StoreReferencesContainer } from '#client' */\n/** @import { Store } from '#shared' */\nimport { subscribe_to_store } from '../../../store/utils.js';\nimport { get as get_store } from '../../../store/shared/index.js';\nimport { define_property, noop } from '../../shared/utils.js';\nimport { get } from '../runtime.js';\nimport { teardown } from './effects.js';\nimport { mutable_source, set } from './sources.js';\nimport { DEV } from 'esm-env';\n\n/**\n * We set this to `true` when updating a store so that we correctly\n * schedule effects if the update takes place inside a `$:` effect\n */\nexport let legacy_is_updating_store = false;\n\n/**\n * Whether or not the prop currently being read is a store binding, as in\n * ``. If it is, we treat the prop as mutable even in\n * runes mode, and skip `binding_property_non_reactive` validation\n */\nlet is_store_binding = false;\n\nlet IS_UNMOUNTED = Symbol();\n\n/**\n * Gets the current value of a store. If the store isn't subscribed to yet, it will create a proxy\n * signal that will be updated when the store is. The store references container is needed to\n * track reassignments to stores and to track the correct component context.\n * @template V\n * @param {Store | null | undefined} store\n * @param {string} store_name\n * @param {StoreReferencesContainer} stores\n * @returns {V}\n */\nexport function store_get(store, store_name, stores) {\n\tconst entry = (stores[store_name] ??= {\n\t\tstore: null,\n\t\tsource: mutable_source(undefined),\n\t\tunsubscribe: noop\n\t});\n\n\tif (DEV) {\n\t\tentry.source.label = store_name;\n\t}\n\n\t// if the component that setup this is already unmounted we don't want to register a subscription\n\tif (entry.store !== store && !(IS_UNMOUNTED in stores)) {\n\t\tentry.unsubscribe();\n\t\tentry.store = store ?? null;\n\n\t\tif (store == null) {\n\t\t\tentry.source.v = undefined; // see synchronous callback comment below\n\t\t\tentry.unsubscribe = noop;\n\t\t} else {\n\t\t\tvar is_synchronous_callback = true;\n\n\t\t\tentry.unsubscribe = subscribe_to_store(store, (v) => {\n\t\t\t\tif (is_synchronous_callback) {\n\t\t\t\t\t// If the first updates to the store value (possibly multiple of them) are synchronously\n\t\t\t\t\t// inside a derived, we will hit the `state_unsafe_mutation` error if we `set` the value\n\t\t\t\t\tentry.source.v = v;\n\t\t\t\t} else {\n\t\t\t\t\tset(entry.source, v);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tis_synchronous_callback = false;\n\t\t}\n\t}\n\n\t// if the component that setup this stores is already unmounted the source will be out of sync\n\t// so we just use the `get` for the stores, less performant but it avoids to create a memory leak\n\t// and it will keep the value consistent\n\tif (store && IS_UNMOUNTED in stores) {\n\t\treturn get_store(store);\n\t}\n\n\treturn get(entry.source);\n}\n\n/**\n * Unsubscribe from a store if it's not the same as the one in the store references container.\n * We need this in addition to `store_get` because someone could unsubscribe from a store but\n * then never subscribe to the new one (if any), causing the subscription to stay open wrongfully.\n * @param {Store | null | undefined} store\n * @param {string} store_name\n * @param {StoreReferencesContainer} stores\n */\nexport function store_unsub(store, store_name, stores) {\n\t/** @type {StoreReferencesContainer[''] | undefined} */\n\tlet entry = stores[store_name];\n\n\tif (entry && entry.store !== store) {\n\t\t// Don't reset store yet, so that store_get above can resubscribe to new store if necessary\n\t\tentry.unsubscribe();\n\t\tentry.unsubscribe = noop;\n\t}\n\n\treturn store;\n}\n\n/**\n * Sets the new value of a store and returns that value.\n * @template V\n * @param {Store} store\n * @param {V} value\n * @returns {V}\n */\nexport function store_set(store, value) {\n\tupdate_with_flag(store, value);\n\treturn value;\n}\n\n/**\n * @param {StoreReferencesContainer} stores\n * @param {string} store_name\n */\nexport function invalidate_store(stores, store_name) {\n\tvar entry = stores[store_name];\n\tif (entry.store !== null) {\n\t\tstore_set(entry.store, entry.source.v);\n\t}\n}\n\n/**\n * Unsubscribes from all auto-subscribed stores on destroy\n * @returns {[StoreReferencesContainer, ()=>void]}\n */\nexport function setup_stores() {\n\t/** @type {StoreReferencesContainer} */\n\tconst stores = {};\n\n\tfunction cleanup() {\n\t\tteardown(() => {\n\t\t\tfor (var store_name in stores) {\n\t\t\t\tconst ref = stores[store_name];\n\t\t\t\tref.unsubscribe();\n\t\t\t}\n\t\t\tdefine_property(stores, IS_UNMOUNTED, {\n\t\t\t\tenumerable: false,\n\t\t\t\tvalue: true\n\t\t\t});\n\t\t});\n\t}\n\n\treturn [stores, cleanup];\n}\n\n/**\n * @param {Store} store\n * @param {V} value\n * @template V\n */\nfunction update_with_flag(store, value) {\n\tlegacy_is_updating_store = true;\n\n\ttry {\n\t\tstore.set(value);\n\t} finally {\n\t\tlegacy_is_updating_store = false;\n\t}\n}\n\n/**\n * Updates a store with a new value.\n * @param {Store} store the store to update\n * @param {any} expression the expression that mutates the store\n * @param {V} new_value the new store value\n * @template V\n */\nexport function store_mutate(store, expression, new_value) {\n\tupdate_with_flag(store, new_value);\n\treturn expression;\n}\n\n/**\n * @param {Store} store\n * @param {number} store_value\n * @param {1 | -1} [d]\n * @returns {number}\n */\nexport function update_store(store, store_value, d = 1) {\n\tupdate_with_flag(store, store_value + d);\n\treturn store_value;\n}\n\n/**\n * @param {Store} store\n * @param {number} store_value\n * @param {1 | -1} [d]\n * @returns {number}\n */\nexport function update_pre_store(store, store_value, d = 1) {\n\tconst value = store_value + d;\n\tupdate_with_flag(store, value);\n\treturn value;\n}\n\n/**\n * Called inside prop getters to communicate that the prop is a store binding\n */\nexport function mark_store_binding() {\n\tis_store_binding = true;\n}\n\n/**\n * Returns a tuple that indicates whether `fn()` reads a prop that is a store binding.\n * Used to prevent `binding_property_non_reactive` validation false positives and\n * ensure that these props are treated as mutable even in runes mode\n * @template T\n * @param {() => T} fn\n * @returns {[T, boolean]}\n */\nexport function capture_store_binding(fn) {\n\tvar previous_is_store_binding = is_store_binding;\n\n\ttry {\n\t\tis_store_binding = false;\n\t\treturn [fn(), is_store_binding];\n\t} finally {\n\t\tis_store_binding = previous_is_store_binding;\n\t}\n}\n","/** @import { Fork } from 'svelte' */\n/** @import { Derived, Effect, Reaction, Source, Value } from '#client' */\nimport {\n\tBLOCK_EFFECT,\n\tBRANCH_EFFECT,\n\tCLEAN,\n\tDESTROYED,\n\tDIRTY,\n\tEFFECT,\n\tASYNC,\n\tINERT,\n\tRENDER_EFFECT,\n\tROOT_EFFECT,\n\tMAYBE_DIRTY,\n\tDERIVED,\n\tEAGER_EFFECT,\n\tERROR_VALUE,\n\tMANAGED_EFFECT,\n\tREACTION_RAN\n} from '#client/constants';\nimport { async_mode_flag } from '../../flags/index.js';\nimport { deferred, define_property, includes } from '../../shared/utils.js';\nimport {\n\tactive_effect,\n\tactive_reaction,\n\tget,\n\tincrement_write_version,\n\tis_dirty,\n\tupdate_effect\n} from '../runtime.js';\nimport * as e from '../errors.js';\nimport { flush_tasks, queue_micro_task } from '../dom/task.js';\nimport { DEV } from 'esm-env';\nimport { invoke_error_boundary } from '../error-handling.js';\nimport { flush_eager_effects, old_values, set_eager_effects, source, update } from './sources.js';\nimport { eager_effect, unlink_effect } from './effects.js';\nimport { defer_effect } from './utils.js';\nimport { UNINITIALIZED } from '../../../constants.js';\nimport { set_signal_status } from './status.js';\nimport { legacy_is_updating_store } from './store.js';\nimport { invariant } from '../../shared/dev.js';\nimport { log_effect_tree } from '../dev/debug.js';\n\n/** @type {Set} */\nconst batches = new Set();\n\n/** @type {Batch | null} */\nexport let current_batch = null;\n\n/**\n * This is needed to avoid overwriting inputs\n * @type {Batch | null}\n */\nexport let previous_batch = null;\n\n/**\n * When time travelling (i.e. working in one batch, while other batches\n * still have ongoing work), we ignore the real values of affected\n * signals in favour of their values within the batch\n * @type {Map | null}\n */\nexport let batch_values = null;\n\n/** @type {Effect | null} */\nlet last_scheduled_effect = null;\n\nexport let is_flushing_sync = false;\nlet is_processing = false;\n\n/**\n * During traversal, this is an array. Newly created effects are (if not immediately\n * executed) pushed to this array, rather than going through the scheduling\n * rigamarole that would cause another turn of the flush loop.\n * @type {Effect[] | null}\n */\nexport let collected_effects = null;\n\n/**\n * An array of effects that are marked during traversal as a result of a `set`\n * (not `internal_set`) call. These will be added to the next batch and\n * trigger another `batch.process()`\n * @type {Effect[] | null}\n * @deprecated when we get rid of legacy mode and stores, we can get rid of this\n */\nexport let legacy_updates = null;\n\nvar flush_count = 0;\nvar source_stacks = DEV ? new Set() : null;\n\nlet uid = 1;\n\nexport class Batch {\n\tid = uid++;\n\n\t/**\n\t * The current values of any signals that are updated in this batch.\n\t * Tuple format: [value, is_derived] (note: is_derived is false for deriveds, too, if they were overridden via assignment)\n\t * They keys of this map are identical to `this.#previous`\n\t * @type {Map}\n\t */\n\tcurrent = new Map();\n\n\t/**\n\t * The values of any signals (sources and deriveds) that are updated in this batch _before_ those updates took place.\n\t * They keys of this map are identical to `this.#current`\n\t * @type {Map}\n\t */\n\tprevious = new Map();\n\n\t/**\n\t * When the batch is committed (and the DOM is updated), we need to remove old branches\n\t * and append new ones by calling the functions added inside (if/each/key/etc) blocks\n\t * @type {Set<(batch: Batch) => void>}\n\t */\n\t#commit_callbacks = new Set();\n\n\t/**\n\t * If a fork is discarded, we need to destroy any effects that are no longer needed\n\t * @type {Set<(batch: Batch) => void>}\n\t */\n\t#discard_callbacks = new Set();\n\n\t/**\n\t * Callbacks that should run only when a fork is committed.\n\t * @type {Set<(batch: Batch) => void>}\n\t */\n\t#fork_commit_callbacks = new Set();\n\n\t/**\n\t * Async effects that are currently in flight\n\t * @type {Map}\n\t */\n\t#pending = new Map();\n\n\t/**\n\t * Async effects that are currently in flight, _not_ inside a pending boundary\n\t * @type {Map}\n\t */\n\t#blocking_pending = new Map();\n\n\t/**\n\t * A deferred that resolves when the batch is committed, used with `settled()`\n\t * TODO replace with Promise.withResolvers once supported widely enough\n\t * @type {{ promise: Promise, resolve: (value?: any) => void, reject: (reason: unknown) => void } | null}\n\t */\n\t#deferred = null;\n\n\t/**\n\t * The root effects that need to be flushed\n\t * @type {Effect[]}\n\t */\n\t#roots = [];\n\n\t/**\n\t * Effects created while this batch was active.\n\t * @type {Effect[]}\n\t */\n\t#new_effects = [];\n\n\t/**\n\t * Deferred effects (which run after async work has completed) that are DIRTY\n\t * @type {Set}\n\t */\n\t#dirty_effects = new Set();\n\n\t/**\n\t * Deferred effects that are MAYBE_DIRTY\n\t * @type {Set}\n\t */\n\t#maybe_dirty_effects = new Set();\n\n\t/**\n\t * A map of branches that still exist, but will be destroyed when this batch\n\t * is committed — we skip over these during `process`.\n\t * The value contains child effects that were dirty/maybe_dirty before being reset,\n\t * so they can be rescheduled if the branch survives.\n\t * @type {Map}\n\t */\n\t#skipped_branches = new Map();\n\n\t/**\n\t * Inverse of #skipped_branches which we need to tell prior batches to unskip them when committing\n\t * @type {Set}\n\t */\n\t#unskipped_branches = new Set();\n\n\tis_fork = false;\n\n\t#decrement_queued = false;\n\n\t/** @type {Set} */\n\t#blockers = new Set();\n\n\t#is_deferred() {\n\t\treturn this.is_fork || this.#blocking_pending.size > 0;\n\t}\n\n\t#is_blocked() {\n\t\tfor (const batch of this.#blockers) {\n\t\t\tfor (const effect of batch.#blocking_pending.keys()) {\n\t\t\t\tvar skipped = false;\n\t\t\t\tvar e = effect;\n\n\t\t\t\twhile (e.parent !== null) {\n\t\t\t\t\tif (this.#skipped_branches.has(e)) {\n\t\t\t\t\t\tskipped = true;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\te = e.parent;\n\t\t\t\t}\n\n\t\t\t\tif (!skipped) {\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false;\n\t}\n\n\t/**\n\t * Add an effect to the #skipped_branches map and reset its children\n\t * @param {Effect} effect\n\t */\n\tskip_effect(effect) {\n\t\tif (!this.#skipped_branches.has(effect)) {\n\t\t\tthis.#skipped_branches.set(effect, { d: [], m: [] });\n\t\t}\n\t\tthis.#unskipped_branches.delete(effect);\n\t}\n\n\t/**\n\t * Remove an effect from the #skipped_branches map and reschedule\n\t * any tracked dirty/maybe_dirty child effects\n\t * @param {Effect} effect\n\t * @param {(e: Effect) => void} callback\n\t */\n\tunskip_effect(effect, callback = (e) => this.schedule(e)) {\n\t\tvar tracked = this.#skipped_branches.get(effect);\n\t\tif (tracked) {\n\t\t\tthis.#skipped_branches.delete(effect);\n\n\t\t\tfor (var e of tracked.d) {\n\t\t\t\tset_signal_status(e, DIRTY);\n\t\t\t\tcallback(e);\n\t\t\t}\n\n\t\t\tfor (e of tracked.m) {\n\t\t\t\tset_signal_status(e, MAYBE_DIRTY);\n\t\t\t\tcallback(e);\n\t\t\t}\n\t\t}\n\t\tthis.#unskipped_branches.add(effect);\n\t}\n\n\t#process() {\n\t\tif (flush_count++ > 1000) {\n\t\t\tbatches.delete(this);\n\t\t\tinfinite_loop_guard();\n\t\t}\n\n\t\t// we only reschedule previously-deferred effects if we expect\n\t\t// to be able to run them after processing the batch\n\t\tif (!this.#is_deferred()) {\n\t\t\tfor (const e of this.#dirty_effects) {\n\t\t\t\tthis.#maybe_dirty_effects.delete(e);\n\t\t\t\tset_signal_status(e, DIRTY);\n\t\t\t\tthis.schedule(e);\n\t\t\t}\n\n\t\t\tfor (const e of this.#maybe_dirty_effects) {\n\t\t\t\tset_signal_status(e, MAYBE_DIRTY);\n\t\t\t\tthis.schedule(e);\n\t\t\t}\n\t\t}\n\n\t\tconst roots = this.#roots;\n\t\tthis.#roots = [];\n\n\t\tthis.apply();\n\n\t\t/** @type {Effect[]} */\n\t\tvar effects = (collected_effects = []);\n\n\t\t/** @type {Effect[]} */\n\t\tvar render_effects = [];\n\n\t\t/**\n\t\t * @type {Effect[]}\n\t\t * @deprecated when we get rid of legacy mode and stores, we can get rid of this\n\t\t */\n\t\tvar updates = (legacy_updates = []);\n\n\t\tfor (const root of roots) {\n\t\t\ttry {\n\t\t\t\tthis.#traverse(root, effects, render_effects);\n\t\t\t} catch (e) {\n\t\t\t\treset_all(root);\n\t\t\t\tthrow e;\n\t\t\t}\n\t\t}\n\n\t\t// any writes should take effect in a subsequent batch\n\t\tcurrent_batch = null;\n\n\t\tif (updates.length > 0) {\n\t\t\tvar batch = Batch.ensure();\n\t\t\tfor (const e of updates) {\n\t\t\t\tbatch.schedule(e);\n\t\t\t}\n\t\t}\n\n\t\tcollected_effects = null;\n\t\tlegacy_updates = null;\n\n\t\tif (this.#is_deferred() || this.#is_blocked()) {\n\t\t\tthis.#defer_effects(render_effects);\n\t\t\tthis.#defer_effects(effects);\n\n\t\t\tfor (const [e, t] of this.#skipped_branches) {\n\t\t\t\treset_branch(e, t);\n\t\t\t}\n\t\t} else {\n\t\t\tif (this.#pending.size === 0) {\n\t\t\t\tbatches.delete(this);\n\t\t\t}\n\n\t\t\t// clear effects. Those that are still needed will be rescheduled through unskipping the skipped branches.\n\t\t\tthis.#dirty_effects.clear();\n\t\t\tthis.#maybe_dirty_effects.clear();\n\n\t\t\t// append/remove branches\n\t\t\tfor (const fn of this.#commit_callbacks) fn(this);\n\t\t\tthis.#commit_callbacks.clear();\n\n\t\t\tprevious_batch = this;\n\t\t\tflush_queued_effects(render_effects);\n\t\t\tflush_queued_effects(effects);\n\t\t\tprevious_batch = null;\n\n\t\t\tthis.#deferred?.resolve();\n\t\t}\n\n\t\tvar next_batch = /** @type {Batch | null} */ (/** @type {unknown} */ (current_batch));\n\n\t\t// Edge case: During traversal new branches might create effects that run immediately and set state,\n\t\t// causing an effect and therefore a root to be scheduled again. We need to traverse the current batch\n\t\t// once more in that case - most of the time this will just clean up dirty branches.\n\t\tif (this.#roots.length > 0) {\n\t\t\tconst batch = (next_batch ??= this);\n\t\t\tbatch.#roots.push(...this.#roots.filter((r) => !batch.#roots.includes(r)));\n\t\t}\n\n\t\tif (next_batch !== null) {\n\t\t\tbatches.add(next_batch);\n\n\t\t\tif (DEV) {\n\t\t\t\tfor (const source of this.current.keys()) {\n\t\t\t\t\t/** @type {Set} */ (source_stacks).add(source);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnext_batch.#process();\n\t\t}\n\n\t\t// In sync mode flushSync can cause #commit to wrongfully think that there needs to be a rebase, so we only do it in async mode\n\t\t// TODO fix the underlying cause, otherwise this will likely regress when non-async mode is removed\n\t\tif (async_mode_flag && !batches.has(this)) {\n\t\t\tthis.#commit();\n\t\t}\n\t}\n\n\t/**\n\t * Traverse the effect tree, executing effects or stashing\n\t * them for later execution as appropriate\n\t * @param {Effect} root\n\t * @param {Effect[]} effects\n\t * @param {Effect[]} render_effects\n\t */\n\t#traverse(root, effects, render_effects) {\n\t\troot.f ^= CLEAN;\n\n\t\tvar effect = root.first;\n\n\t\twhile (effect !== null) {\n\t\t\tvar flags = effect.f;\n\t\t\tvar is_branch = (flags & (BRANCH_EFFECT | ROOT_EFFECT)) !== 0;\n\t\t\tvar is_skippable_branch = is_branch && (flags & CLEAN) !== 0;\n\n\t\t\tvar skip = is_skippable_branch || (flags & INERT) !== 0 || this.#skipped_branches.has(effect);\n\n\t\t\tif (!skip && effect.fn !== null) {\n\t\t\t\tif (is_branch) {\n\t\t\t\t\teffect.f ^= CLEAN;\n\t\t\t\t} else if ((flags & EFFECT) !== 0) {\n\t\t\t\t\teffects.push(effect);\n\t\t\t\t} else if (async_mode_flag && (flags & (RENDER_EFFECT | MANAGED_EFFECT)) !== 0) {\n\t\t\t\t\trender_effects.push(effect);\n\t\t\t\t} else if (is_dirty(effect)) {\n\t\t\t\t\tif ((flags & BLOCK_EFFECT) !== 0) this.#maybe_dirty_effects.add(effect);\n\t\t\t\t\tupdate_effect(effect);\n\t\t\t\t}\n\n\t\t\t\tvar child = effect.first;\n\n\t\t\t\tif (child !== null) {\n\t\t\t\t\teffect = child;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twhile (effect !== null) {\n\t\t\t\tvar next = effect.next;\n\n\t\t\t\tif (next !== null) {\n\t\t\t\t\teffect = next;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\teffect = effect.parent;\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * @param {Effect[]} effects\n\t */\n\t#defer_effects(effects) {\n\t\tfor (var i = 0; i < effects.length; i += 1) {\n\t\t\tdefer_effect(effects[i], this.#dirty_effects, this.#maybe_dirty_effects);\n\t\t}\n\t}\n\n\t/**\n\t * Associate a change to a given source with the current\n\t * batch, noting its previous and current values\n\t * @param {Value} source\n\t * @param {any} value\n\t * @param {boolean} [is_derived]\n\t */\n\tcapture(source, value, is_derived = false) {\n\t\tif (source.v !== UNINITIALIZED && !this.previous.has(source)) {\n\t\t\tthis.previous.set(source, source.v);\n\t\t}\n\n\t\t// Don't save errors in `batch_values`, or they won't be thrown in `runtime.js#get`\n\t\tif ((source.f & ERROR_VALUE) === 0) {\n\t\t\tthis.current.set(source, [value, is_derived]);\n\t\t\tbatch_values?.set(source, value);\n\t\t}\n\n\t\tif (!this.is_fork) {\n\t\t\tsource.v = value;\n\t\t}\n\t}\n\n\tactivate() {\n\t\tcurrent_batch = this;\n\t}\n\n\tdeactivate() {\n\t\tcurrent_batch = null;\n\t\tbatch_values = null;\n\t}\n\n\tflush() {\n\t\tvar source_stacks = DEV ? new Set() : null;\n\n\t\ttry {\n\t\t\tis_processing = true;\n\t\t\tcurrent_batch = this;\n\n\t\t\tthis.#process();\n\t\t} finally {\n\t\t\tflush_count = 0;\n\t\t\tlast_scheduled_effect = null;\n\t\t\tcollected_effects = null;\n\t\t\tlegacy_updates = null;\n\t\t\tis_processing = false;\n\n\t\t\tcurrent_batch = null;\n\t\t\tbatch_values = null;\n\n\t\t\told_values.clear();\n\n\t\t\tif (DEV) {\n\t\t\t\tfor (const source of /** @type {Set} */ (source_stacks)) {\n\t\t\t\t\tsource.updated = null;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tdiscard() {\n\t\tfor (const fn of this.#discard_callbacks) fn(this);\n\t\tthis.#discard_callbacks.clear();\n\t\tthis.#fork_commit_callbacks.clear();\n\n\t\tbatches.delete(this);\n\t}\n\n\t/**\n\t * @param {Effect} effect\n\t */\n\tregister_created_effect(effect) {\n\t\tthis.#new_effects.push(effect);\n\t}\n\n\t#commit() {\n\t\t// If there are other pending batches, they now need to be 'rebased' —\n\t\t// in other words, we re-run block/async effects with the newly\n\t\t// committed state, unless the batch in question has a more\n\t\t// recent value for a given source\n\t\tfor (const batch of batches) {\n\t\t\tvar is_earlier = batch.id < this.id;\n\n\t\t\t/** @type {Source[]} */\n\t\t\tvar sources = [];\n\n\t\t\tfor (const [source, [value, is_derived]] of this.current) {\n\t\t\t\tif (batch.current.has(source)) {\n\t\t\t\t\tvar batch_value = /** @type {[any, boolean]} */ (batch.current.get(source))[0]; // faster than destructuring\n\n\t\t\t\t\tif (is_earlier && value !== batch_value) {\n\t\t\t\t\t\t// bring the value up to date\n\t\t\t\t\t\tbatch.current.set(source, [value, is_derived]);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// same value or later batch has more recent value,\n\t\t\t\t\t\t// no need to re-run these effects\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsources.push(source);\n\t\t\t}\n\n\t\t\t// Re-run async/block effects that depend on distinct values changed in both batches\n\t\t\tvar others = [...batch.current.keys()].filter((s) => !this.current.has(s));\n\n\t\t\tif (others.length === 0) {\n\t\t\t\tif (is_earlier) {\n\t\t\t\t\t// this batch is now obsolete and can be discarded\n\t\t\t\t\tbatch.discard();\n\t\t\t\t}\n\t\t\t} else if (sources.length > 0) {\n\t\t\t\tif (DEV) {\n\t\t\t\t\tinvariant(batch.#roots.length === 0, 'Batch has scheduled roots');\n\t\t\t\t}\n\n\t\t\t\t// A batch was unskipped in a later batch -> tell prior batches to unskip it, too\n\t\t\t\tif (is_earlier) {\n\t\t\t\t\tfor (const unskipped of this.#unskipped_branches) {\n\t\t\t\t\t\tbatch.unskip_effect(unskipped, (e) => {\n\t\t\t\t\t\t\tif ((e.f & (BLOCK_EFFECT | ASYNC)) !== 0) {\n\t\t\t\t\t\t\t\tbatch.schedule(e);\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tbatch.#defer_effects([e]);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbatch.activate();\n\n\t\t\t\t/** @type {Set} */\n\t\t\t\tvar marked = new Set();\n\n\t\t\t\t/** @type {Map} */\n\t\t\t\tvar checked = new Map();\n\n\t\t\t\tfor (var source of sources) {\n\t\t\t\t\tmark_effects(source, others, marked, checked);\n\t\t\t\t}\n\n\t\t\t\tchecked = new Map();\n\t\t\t\tvar current_unequal = [...batch.current.keys()].filter((c) =>\n\t\t\t\t\tthis.current.has(c) ? /** @type {[any, boolean]} */ (this.current.get(c))[0] !== c : true\n\t\t\t\t);\n\n\t\t\t\tfor (const effect of this.#new_effects) {\n\t\t\t\t\tif (\n\t\t\t\t\t\t(effect.f & (DESTROYED | INERT | EAGER_EFFECT)) === 0 &&\n\t\t\t\t\t\tdepends_on(effect, current_unequal, checked)\n\t\t\t\t\t) {\n\t\t\t\t\t\tif ((effect.f & (ASYNC | BLOCK_EFFECT)) !== 0) {\n\t\t\t\t\t\t\tset_signal_status(effect, DIRTY);\n\t\t\t\t\t\t\tbatch.schedule(effect);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbatch.#dirty_effects.add(effect);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Only apply and traverse when we know we triggered async work with marking the effects\n\t\t\t\tif (batch.#roots.length > 0) {\n\t\t\t\t\tbatch.apply();\n\n\t\t\t\t\tfor (var root of batch.#roots) {\n\t\t\t\t\t\tbatch.#traverse(root, [], []);\n\t\t\t\t\t}\n\n\t\t\t\t\tbatch.#roots = [];\n\t\t\t\t}\n\n\t\t\t\tbatch.deactivate();\n\t\t\t}\n\t\t}\n\n\t\tfor (const batch of batches) {\n\t\t\tif (batch.#blockers.has(this)) {\n\t\t\t\tbatch.#blockers.delete(this);\n\n\t\t\t\tif (batch.#blockers.size === 0 && !batch.#is_deferred()) {\n\t\t\t\t\tbatch.activate();\n\t\t\t\t\tbatch.#process();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * @param {boolean} blocking\n\t * @param {Effect} effect\n\t */\n\tincrement(blocking, effect) {\n\t\tlet pending_count = this.#pending.get(effect) ?? 0;\n\t\tthis.#pending.set(effect, pending_count + 1);\n\n\t\tif (blocking) {\n\t\t\tlet blocking_pending_count = this.#blocking_pending.get(effect) ?? 0;\n\t\t\tthis.#blocking_pending.set(effect, blocking_pending_count + 1);\n\t\t}\n\t}\n\n\t/**\n\t * @param {boolean} blocking\n\t * @param {Effect} effect\n\t * @param {boolean} skip - whether to skip updates (because this is triggered by a stale reaction)\n\t */\n\tdecrement(blocking, effect, skip) {\n\t\tlet pending_count = this.#pending.get(effect) ?? 0;\n\n\t\tif (pending_count === 1) {\n\t\t\tthis.#pending.delete(effect);\n\t\t} else {\n\t\t\tthis.#pending.set(effect, pending_count - 1);\n\t\t}\n\n\t\tif (blocking) {\n\t\t\tlet blocking_pending_count = this.#blocking_pending.get(effect) ?? 0;\n\n\t\t\tif (blocking_pending_count === 1) {\n\t\t\t\tthis.#blocking_pending.delete(effect);\n\t\t\t} else {\n\t\t\t\tthis.#blocking_pending.set(effect, blocking_pending_count - 1);\n\t\t\t}\n\t\t}\n\n\t\tif (this.#decrement_queued || skip) return;\n\t\tthis.#decrement_queued = true;\n\n\t\tqueue_micro_task(() => {\n\t\t\tthis.#decrement_queued = false;\n\t\t\tthis.flush();\n\t\t});\n\t}\n\n\t/**\n\t * @param {Set} dirty_effects\n\t * @param {Set} maybe_dirty_effects\n\t */\n\ttransfer_effects(dirty_effects, maybe_dirty_effects) {\n\t\tfor (const e of dirty_effects) {\n\t\t\tthis.#dirty_effects.add(e);\n\t\t}\n\n\t\tfor (const e of maybe_dirty_effects) {\n\t\t\tthis.#maybe_dirty_effects.add(e);\n\t\t}\n\n\t\tdirty_effects.clear();\n\t\tmaybe_dirty_effects.clear();\n\t}\n\n\t/** @param {(batch: Batch) => void} fn */\n\toncommit(fn) {\n\t\tthis.#commit_callbacks.add(fn);\n\t}\n\n\t/** @param {(batch: Batch) => void} fn */\n\tondiscard(fn) {\n\t\tthis.#discard_callbacks.add(fn);\n\t}\n\n\t/** @param {(batch: Batch) => void} fn */\n\ton_fork_commit(fn) {\n\t\tthis.#fork_commit_callbacks.add(fn);\n\t}\n\n\trun_fork_commit_callbacks() {\n\t\tfor (const fn of this.#fork_commit_callbacks) fn(this);\n\t\tthis.#fork_commit_callbacks.clear();\n\t}\n\n\tsettled() {\n\t\treturn (this.#deferred ??= deferred()).promise;\n\t}\n\n\tstatic ensure() {\n\t\tif (current_batch === null) {\n\t\t\tconst batch = (current_batch = new Batch());\n\n\t\t\tif (!is_processing) {\n\t\t\t\tbatches.add(current_batch);\n\n\t\t\t\tif (!is_flushing_sync) {\n\t\t\t\t\tqueue_micro_task(() => {\n\t\t\t\t\t\tif (current_batch !== batch) {\n\t\t\t\t\t\t\t// a flushSync happened in the meantime\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tbatch.flush();\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn current_batch;\n\t}\n\n\tapply() {\n\t\tif (!async_mode_flag || (!this.is_fork && batches.size === 1)) {\n\t\t\tbatch_values = null;\n\t\t\treturn;\n\t\t}\n\n\t\t// if there are multiple batches, we are 'time travelling' —\n\t\t// we need to override values with the ones in this batch...\n\t\tbatch_values = new Map();\n\t\tfor (const [source, [value]] of this.current) {\n\t\t\tbatch_values.set(source, value);\n\t\t}\n\n\t\t// ...and undo changes belonging to other batches unless they block this one\n\t\tfor (const batch of batches) {\n\t\t\tif (batch === this || batch.is_fork) continue;\n\n\t\t\t// A batch is blocked on an earlier batch if it overlaps with the earlier batch's changes but is not a superset\n\t\t\tvar intersects = false;\n\t\t\tvar differs = false;\n\n\t\t\tif (batch.id < this.id) {\n\t\t\t\tfor (const [source, [, is_derived]] of batch.current) {\n\t\t\t\t\t// Derived values don't partake in the blocking mechanism, because a derived could\n\t\t\t\t\t// be triggered in one batch already but not the other one yet, causing a false-positive\n\t\t\t\t\tif (is_derived) continue;\n\n\t\t\t\t\tintersects ||= this.current.has(source);\n\t\t\t\t\tdiffers ||= !this.current.has(source);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (intersects && differs) {\n\t\t\t\tthis.#blockers.add(batch);\n\t\t\t} else {\n\t\t\t\tfor (const [source, previous] of batch.previous) {\n\t\t\t\t\tif (!batch_values.has(source)) {\n\t\t\t\t\t\tbatch_values.set(source, previous);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t *\n\t * @param {Effect} effect\n\t */\n\tschedule(effect) {\n\t\tlast_scheduled_effect = effect;\n\n\t\t// defer render effects inside a pending boundary\n\t\t// TODO the `REACTION_RAN` check is only necessary because of legacy `$:` effects AFAICT — we can remove later\n\t\tif (\n\t\t\teffect.b?.is_pending &&\n\t\t\t(effect.f & (EFFECT | RENDER_EFFECT | MANAGED_EFFECT)) !== 0 &&\n\t\t\t(effect.f & REACTION_RAN) === 0\n\t\t) {\n\t\t\teffect.b.defer_effect(effect);\n\t\t\treturn;\n\t\t}\n\n\t\tvar e = effect;\n\n\t\twhile (e.parent !== null) {\n\t\t\te = e.parent;\n\t\t\tvar flags = e.f;\n\n\t\t\t// if the effect is being scheduled because a parent (each/await/etc) block\n\t\t\t// updated an internal source, or because a branch is being unskipped,\n\t\t\t// bail out or we'll cause a second flush\n\t\t\tif (collected_effects !== null && e === active_effect) {\n\t\t\t\tif (async_mode_flag) return;\n\n\t\t\t\t// in sync mode, render effects run during traversal. in an extreme edge case\n\t\t\t\t// — namely that we're setting a value inside a derived read during traversal —\n\t\t\t\t// they can be made dirty after they have already been visited, in which\n\t\t\t\t// case we shouldn't bail out. we also shouldn't bail out if we're\n\t\t\t\t// updating a store inside a `$:`, since this might invalidate\n\t\t\t\t// effects that were already visited\n\t\t\t\tif (\n\t\t\t\t\t(active_reaction === null || (active_reaction.f & DERIVED) === 0) &&\n\t\t\t\t\t!legacy_is_updating_store\n\t\t\t\t) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ((flags & (ROOT_EFFECT | BRANCH_EFFECT)) !== 0) {\n\t\t\t\tif ((flags & CLEAN) === 0) {\n\t\t\t\t\t// branch is already dirty, bail\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\te.f ^= CLEAN;\n\t\t\t}\n\t\t}\n\n\t\tthis.#roots.push(e);\n\t}\n}\n\n// TODO Svelte@6 think about removing the callback argument.\n/**\n * Synchronously flush any pending updates.\n * Returns void if no callback is provided, otherwise returns the result of calling the callback.\n * @template [T=void]\n * @param {(() => T) | undefined} [fn]\n * @returns {T}\n */\nexport function flushSync(fn) {\n\tvar was_flushing_sync = is_flushing_sync;\n\tis_flushing_sync = true;\n\n\ttry {\n\t\tvar result;\n\n\t\tif (fn) {\n\t\t\tif (current_batch !== null && !current_batch.is_fork) {\n\t\t\t\tcurrent_batch.flush();\n\t\t\t}\n\n\t\t\tresult = fn();\n\t\t}\n\n\t\twhile (true) {\n\t\t\tflush_tasks();\n\n\t\t\tif (current_batch === null) {\n\t\t\t\treturn /** @type {T} */ (result);\n\t\t\t}\n\n\t\t\tcurrent_batch.flush();\n\t\t}\n\t} finally {\n\t\tis_flushing_sync = was_flushing_sync;\n\t}\n}\n\nfunction infinite_loop_guard() {\n\tif (DEV) {\n\t\tvar updates = new Map();\n\n\t\tfor (const source of /** @type {Batch} */ (current_batch).current.keys()) {\n\t\t\tfor (const [stack, update] of source.updated ?? []) {\n\t\t\t\tvar entry = updates.get(stack);\n\n\t\t\t\tif (!entry) {\n\t\t\t\t\tentry = { error: update.error, count: 0 };\n\t\t\t\t\tupdates.set(stack, entry);\n\t\t\t\t}\n\n\t\t\t\tentry.count += update.count;\n\t\t\t}\n\t\t}\n\n\t\tfor (const update of updates.values()) {\n\t\t\tif (update.error) {\n\t\t\t\t// eslint-disable-next-line no-console\n\t\t\t\tconsole.error(update.error);\n\t\t\t}\n\t\t}\n\t}\n\n\ttry {\n\t\te.effect_update_depth_exceeded();\n\t} catch (error) {\n\t\tif (DEV) {\n\t\t\t// stack contains no useful information, replace it\n\t\t\tdefine_property(error, 'stack', { value: '' });\n\t\t}\n\n\t\t// Best effort: invoke the boundary nearest the most recent\n\t\t// effect and hope that it's relevant to the infinite loop\n\t\tinvoke_error_boundary(error, last_scheduled_effect);\n\t}\n}\n\n/** @type {Set | null} */\nexport let eager_block_effects = null;\n\n/**\n * @param {Array} effects\n * @returns {void}\n */\nfunction flush_queued_effects(effects) {\n\tvar length = effects.length;\n\tif (length === 0) return;\n\n\tvar i = 0;\n\n\twhile (i < length) {\n\t\tvar effect = effects[i++];\n\n\t\tif ((effect.f & (DESTROYED | INERT)) === 0 && is_dirty(effect)) {\n\t\t\teager_block_effects = new Set();\n\n\t\t\tupdate_effect(effect);\n\n\t\t\t// Effects with no dependencies or teardown do not get added to the effect tree.\n\t\t\t// Deferred effects (e.g. `$effect(...)`) _are_ added to the tree because we\n\t\t\t// don't know if we need to keep them until they are executed. Doing the check\n\t\t\t// here (rather than in `update_effect`) allows us to skip the work for\n\t\t\t// immediate effects.\n\t\t\tif (\n\t\t\t\teffect.deps === null &&\n\t\t\t\teffect.first === null &&\n\t\t\t\teffect.nodes === null &&\n\t\t\t\teffect.teardown === null &&\n\t\t\t\teffect.ac === null\n\t\t\t) {\n\t\t\t\t// remove this effect from the graph\n\t\t\t\tunlink_effect(effect);\n\t\t\t}\n\n\t\t\t// If update_effect() has a flushSync() in it, we may have flushed another flush_queued_effects(),\n\t\t\t// which already handled this logic and did set eager_block_effects to null.\n\t\t\tif (eager_block_effects?.size > 0) {\n\t\t\t\told_values.clear();\n\n\t\t\t\tfor (const e of eager_block_effects) {\n\t\t\t\t\t// Skip eager effects that have already been unmounted\n\t\t\t\t\tif ((e.f & (DESTROYED | INERT)) !== 0) continue;\n\n\t\t\t\t\t// Run effects in order from ancestor to descendant, else we could run into nullpointers\n\t\t\t\t\t/** @type {Effect[]} */\n\t\t\t\t\tconst ordered_effects = [e];\n\t\t\t\t\tlet ancestor = e.parent;\n\t\t\t\t\twhile (ancestor !== null) {\n\t\t\t\t\t\tif (eager_block_effects.has(ancestor)) {\n\t\t\t\t\t\t\teager_block_effects.delete(ancestor);\n\t\t\t\t\t\t\tordered_effects.push(ancestor);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tancestor = ancestor.parent;\n\t\t\t\t\t}\n\n\t\t\t\t\tfor (let j = ordered_effects.length - 1; j >= 0; j--) {\n\t\t\t\t\t\tconst e = ordered_effects[j];\n\t\t\t\t\t\t// Skip eager effects that have already been unmounted\n\t\t\t\t\t\tif ((e.f & (DESTROYED | INERT)) !== 0) continue;\n\t\t\t\t\t\tupdate_effect(e);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\teager_block_effects.clear();\n\t\t\t}\n\t\t}\n\t}\n\n\teager_block_effects = null;\n}\n\n/**\n * This is similar to `mark_reactions`, but it only marks async/block effects\n * depending on `value` and at least one of the other `sources`, so that\n * these effects can re-run after another batch has been committed\n * @param {Value} value\n * @param {Source[]} sources\n * @param {Set} marked\n * @param {Map} checked\n */\nfunction mark_effects(value, sources, marked, checked) {\n\tif (marked.has(value)) return;\n\tmarked.add(value);\n\n\tif (value.reactions !== null) {\n\t\tfor (const reaction of value.reactions) {\n\t\t\tconst flags = reaction.f;\n\n\t\t\tif ((flags & DERIVED) !== 0) {\n\t\t\t\tmark_effects(/** @type {Derived} */ (reaction), sources, marked, checked);\n\t\t\t} else if (\n\t\t\t\t(flags & (ASYNC | BLOCK_EFFECT)) !== 0 &&\n\t\t\t\t(flags & DIRTY) === 0 &&\n\t\t\t\tdepends_on(reaction, sources, checked)\n\t\t\t) {\n\t\t\t\tset_signal_status(reaction, DIRTY);\n\t\t\t\tschedule_effect(/** @type {Effect} */ (reaction));\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * When committing a fork, we need to trigger eager effects so that\n * any `$state.eager(...)` expressions update immediately. This\n * function allows us to discover them\n * @param {Value} value\n * @param {Set} effects\n */\nfunction mark_eager_effects(value, effects) {\n\tif (value.reactions === null) return;\n\n\tfor (const reaction of value.reactions) {\n\t\tconst flags = reaction.f;\n\n\t\tif ((flags & DERIVED) !== 0) {\n\t\t\tmark_eager_effects(/** @type {Derived} */ (reaction), effects);\n\t\t} else if ((flags & EAGER_EFFECT) !== 0) {\n\t\t\tset_signal_status(reaction, DIRTY);\n\t\t\teffects.add(/** @type {Effect} */ (reaction));\n\t\t}\n\t}\n}\n\n/**\n * @param {Reaction} reaction\n * @param {Source[]} sources\n * @param {Map} checked\n */\nfunction depends_on(reaction, sources, checked) {\n\tconst depends = checked.get(reaction);\n\tif (depends !== undefined) return depends;\n\n\tif (reaction.deps !== null) {\n\t\tfor (const dep of reaction.deps) {\n\t\t\tif (includes.call(sources, dep)) {\n\t\t\t\treturn true;\n\t\t\t}\n\n\t\t\tif ((dep.f & DERIVED) !== 0 && depends_on(/** @type {Derived} */ (dep), sources, checked)) {\n\t\t\t\tchecked.set(/** @type {Derived} */ (dep), true);\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\n\tchecked.set(reaction, false);\n\n\treturn false;\n}\n\n/**\n * @param {Effect} effect\n * @returns {void}\n */\nexport function schedule_effect(effect) {\n\t/** @type {Batch} */ (current_batch).schedule(effect);\n}\n\n/** @type {Source[]} */\nlet eager_versions = [];\n\nfunction eager_flush() {\n\tflushSync(() => {\n\t\tconst eager = eager_versions;\n\t\teager_versions = [];\n\t\tfor (const version of eager) {\n\t\t\tupdate(version);\n\t\t}\n\t});\n}\n\n/**\n * Implementation of `$state.eager(fn())`\n * @template T\n * @param {() => T} fn\n * @returns {T}\n */\nexport function eager(fn) {\n\tvar version = source(0);\n\tvar initial = true;\n\tvar value = /** @type {T} */ (undefined);\n\n\tget(version);\n\n\teager_effect(() => {\n\t\tif (initial) {\n\t\t\t// the first time this runs, we create an eager effect\n\t\t\t// that will run eagerly whenever the expression changes\n\t\t\tvar previous_batch_values = batch_values;\n\n\t\t\ttry {\n\t\t\t\tbatch_values = null;\n\t\t\t\tvalue = fn();\n\t\t\t} finally {\n\t\t\t\tbatch_values = previous_batch_values;\n\t\t\t}\n\n\t\t\treturn;\n\t\t}\n\n\t\t// the second time this effect runs, it's to schedule a\n\t\t// `version` update. since this will recreate the effect,\n\t\t// we don't need to evaluate the expression here\n\t\tif (eager_versions.length === 0) {\n\t\t\tqueue_micro_task(eager_flush);\n\t\t}\n\n\t\teager_versions.push(version);\n\t});\n\n\tinitial = false;\n\n\treturn value;\n}\n\n/**\n * Mark all the effects inside a skipped branch CLEAN, so that\n * they can be correctly rescheduled later. Tracks dirty and maybe_dirty\n * effects so they can be rescheduled if the branch survives.\n * @param {Effect} effect\n * @param {{ d: Effect[], m: Effect[] }} tracked\n */\nfunction reset_branch(effect, tracked) {\n\t// clean branch = nothing dirty inside, no need to traverse further\n\tif ((effect.f & BRANCH_EFFECT) !== 0 && (effect.f & CLEAN) !== 0) {\n\t\treturn;\n\t}\n\n\tif ((effect.f & DIRTY) !== 0) {\n\t\ttracked.d.push(effect);\n\t} else if ((effect.f & MAYBE_DIRTY) !== 0) {\n\t\ttracked.m.push(effect);\n\t}\n\n\tset_signal_status(effect, CLEAN);\n\n\tvar e = effect.first;\n\twhile (e !== null) {\n\t\treset_branch(e, tracked);\n\t\te = e.next;\n\t}\n}\n\n/**\n * Mark an entire effect tree clean following an error\n * @param {Effect} effect\n */\nfunction reset_all(effect) {\n\tset_signal_status(effect, CLEAN);\n\n\tvar e = effect.first;\n\twhile (e !== null) {\n\t\treset_all(e);\n\t\te = e.next;\n\t}\n}\n\n/**\n * Creates a 'fork', in which state changes are evaluated but not applied to the DOM.\n * This is useful for speculatively loading data (for example) when you suspect that\n * the user is about to take some action.\n *\n * Frameworks like SvelteKit can use this to preload data when the user touches or\n * hovers over a link, making any subsequent navigation feel instantaneous.\n *\n * The `fn` parameter is a synchronous function that modifies some state. The\n * state changes will be reverted after the fork is initialised, then reapplied\n * if and when the fork is eventually committed.\n *\n * When it becomes clear that a fork will _not_ be committed (e.g. because the\n * user navigated elsewhere), it must be discarded to avoid leaking memory.\n *\n * @param {() => void} fn\n * @returns {Fork}\n * @since 5.42\n */\nexport function fork(fn) {\n\tif (!async_mode_flag) {\n\t\te.experimental_async_required('fork');\n\t}\n\n\tif (current_batch !== null) {\n\t\te.fork_timing();\n\t}\n\n\tvar batch = Batch.ensure();\n\tbatch.is_fork = true;\n\tbatch_values = new Map();\n\n\tvar committed = false;\n\tvar settled = batch.settled();\n\n\tflushSync(fn);\n\n\treturn {\n\t\tcommit: async () => {\n\t\t\tif (committed) {\n\t\t\t\tawait settled;\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tif (!batches.has(batch)) {\n\t\t\t\te.fork_discarded();\n\t\t\t}\n\n\t\t\tcommitted = true;\n\n\t\t\tbatch.is_fork = false;\n\n\t\t\t// apply changes and update write versions so deriveds see the change\n\t\t\tfor (var [source, [value]] of batch.current) {\n\t\t\t\tsource.v = value;\n\t\t\t\tsource.wv = increment_write_version();\n\t\t\t}\n\n\t\t\tbatch.activate();\n\t\t\tbatch.run_fork_commit_callbacks();\n\t\t\tbatch.deactivate();\n\n\t\t\t// trigger any `$state.eager(...)` expressions with the new state.\n\t\t\t// eager effects don't get scheduled like other effects, so we\n\t\t\t// can't just encounter them during traversal, we need to\n\t\t\t// proactively flush them\n\t\t\t// TODO maybe there's a better implementation?\n\t\t\tflushSync(() => {\n\t\t\t\t/** @type {Set} */\n\t\t\t\tvar eager_effects = new Set();\n\n\t\t\t\tfor (var source of batch.current.keys()) {\n\t\t\t\t\tmark_eager_effects(source, eager_effects);\n\t\t\t\t}\n\n\t\t\t\tset_eager_effects(eager_effects);\n\t\t\t\tflush_eager_effects();\n\t\t\t});\n\n\t\t\tbatch.flush();\n\t\t\tawait settled;\n\t\t},\n\t\tdiscard: () => {\n\t\t\t// cause any MAYBE_DIRTY deriveds to update\n\t\t\t// if they depend on things thath changed\n\t\t\t// inside the discarded fork\n\t\t\tfor (var source of batch.current.keys()) {\n\t\t\t\tsource.wv = increment_write_version();\n\t\t\t}\n\n\t\t\tif (!committed && batches.has(batch)) {\n\t\t\t\tbatch.discard();\n\t\t\t}\n\t\t}\n\t};\n}\n\n/**\n * Forcibly remove all current batches, to prevent cross-talk between tests\n */\nexport function clear() {\n\tbatches.clear();\n}\n","import { get, tick, untrack } from '../internal/client/runtime.js';\nimport { effect_tracking, render_effect } from '../internal/client/reactivity/effects.js';\nimport { source, increment } from '../internal/client/reactivity/sources.js';\nimport { tag } from '../internal/client/dev/tracing.js';\nimport { DEV } from 'esm-env';\nimport { queue_micro_task } from '../internal/client/dom/task.js';\n\n/**\n * Returns a `subscribe` function that integrates external event-based systems with Svelte's reactivity.\n * It's particularly useful for integrating with web APIs like `MediaQuery`, `IntersectionObserver`, or `WebSocket`.\n *\n * If `subscribe` is called inside an effect (including indirectly, for example inside a getter),\n * the `start` callback will be called with an `update` function. Whenever `update` is called, the effect re-runs.\n *\n * If `start` returns a cleanup function, it will be called when the effect is destroyed.\n *\n * If `subscribe` is called in multiple effects, `start` will only be called once as long as the effects\n * are active, and the returned teardown function will only be called when all effects are destroyed.\n *\n * It's best understood with an example. Here's an implementation of [`MediaQuery`](https://svelte.dev/docs/svelte/svelte-reactivity#MediaQuery):\n *\n * ```js\n * import { createSubscriber } from 'svelte/reactivity';\n * import { on } from 'svelte/events';\n *\n * export class MediaQuery {\n * \t#query;\n * \t#subscribe;\n *\n * \tconstructor(query) {\n * \t\tthis.#query = window.matchMedia(`(${query})`);\n *\n * \t\tthis.#subscribe = createSubscriber((update) => {\n * \t\t\t// when the `change` event occurs, re-run any effects that read `this.current`\n * \t\t\tconst off = on(this.#query, 'change', update);\n *\n * \t\t\t// stop listening when all the effects are destroyed\n * \t\t\treturn () => off();\n * \t\t});\n * \t}\n *\n * \tget current() {\n * \t\t// This makes the getter reactive, if read in an effect\n * \t\tthis.#subscribe();\n *\n * \t\t// Return the current state of the query, whether or not we're in an effect\n * \t\treturn this.#query.matches;\n * \t}\n * }\n * ```\n * @param {(update: () => void) => (() => void) | void} start\n * @since 5.7.0\n */\nexport function createSubscriber(start) {\n\tlet subscribers = 0;\n\tlet version = source(0);\n\t/** @type {(() => void) | void} */\n\tlet stop;\n\n\tif (DEV) {\n\t\ttag(version, 'createSubscriber version');\n\t}\n\n\treturn () => {\n\t\tif (effect_tracking()) {\n\t\t\tget(version);\n\n\t\t\trender_effect(() => {\n\t\t\t\tif (subscribers === 0) {\n\t\t\t\t\tstop = untrack(() => start(() => increment(version)));\n\t\t\t\t}\n\n\t\t\t\tsubscribers += 1;\n\n\t\t\t\treturn () => {\n\t\t\t\t\tqueue_micro_task(() => {\n\t\t\t\t\t\t// Only count down after a microtask, else we would reach 0 before our own render effect reruns,\n\t\t\t\t\t\t// but reach 1 again when the tick callback of the prior teardown runs. That would mean we\n\t\t\t\t\t\t// re-subcribe unnecessarily and create a memory leak because the old subscription is never cleaned up.\n\t\t\t\t\t\tsubscribers -= 1;\n\n\t\t\t\t\t\tif (subscribers === 0) {\n\t\t\t\t\t\t\tstop?.();\n\t\t\t\t\t\t\tstop = undefined;\n\t\t\t\t\t\t\t// Increment the version to ensure any dependent deriveds are marked dirty when the subscription is picked up again later.\n\t\t\t\t\t\t\t// If we didn't do this then the comparison of write versions would determine that the derived has a later version than\n\t\t\t\t\t\t\t// the subscriber, and it would not be re-run.\n\t\t\t\t\t\t\tincrement(version);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t};\n\t\t\t});\n\t\t}\n\t};\n}\n","/** @import { Effect, Source, TemplateNode, } from '#client' */\nimport {\n\tBOUNDARY_EFFECT,\n\tDIRTY,\n\tEFFECT_PRESERVED,\n\tEFFECT_TRANSPARENT,\n\tMAYBE_DIRTY\n} from '#client/constants';\nimport { HYDRATION_START_ELSE, HYDRATION_START_FAILED } from '../../../../constants.js';\nimport { component_context, set_component_context } from '../../context.js';\nimport { handle_error, invoke_error_boundary } from '../../error-handling.js';\nimport {\n\tblock,\n\tbranch,\n\tdestroy_effect,\n\tmove_effect,\n\tpause_effect\n} from '../../reactivity/effects.js';\nimport {\n\tactive_effect,\n\tactive_reaction,\n\tget,\n\tset_active_effect,\n\tset_active_reaction\n} from '../../runtime.js';\nimport {\n\thydrate_next,\n\thydrate_node,\n\thydrating,\n\tnext,\n\tskip_nodes,\n\tset_hydrate_node\n} from '../hydration.js';\nimport { queue_micro_task } from '../task.js';\nimport * as e from '../../errors.js';\nimport * as w from '../../warnings.js';\nimport { DEV } from 'esm-env';\nimport { Batch, current_batch, previous_batch, schedule_effect } from '../../reactivity/batch.js';\nimport { internal_set, source } from '../../reactivity/sources.js';\nimport { tag } from '../../dev/tracing.js';\nimport { createSubscriber } from '../../../../reactivity/create-subscriber.js';\nimport { create_text } from '../operations.js';\nimport { defer_effect } from '../../reactivity/utils.js';\nimport { set_signal_status } from '../../reactivity/status.js';\n\n/**\n * @typedef {{\n * \t onerror?: (error: unknown, reset: () => void) => void;\n * failed?: (anchor: Node, error: () => unknown, reset: () => () => void) => void;\n * pending?: (anchor: Node) => void;\n * }} BoundaryProps\n */\n\nvar flags = EFFECT_TRANSPARENT | EFFECT_PRESERVED;\n\n/**\n * @param {TemplateNode} node\n * @param {BoundaryProps} props\n * @param {((anchor: Node) => void)} children\n * @param {((error: unknown) => unknown) | undefined} [transform_error]\n * @returns {void}\n */\nexport function boundary(node, props, children, transform_error) {\n\tnew Boundary(node, props, children, transform_error);\n}\n\nexport class Boundary {\n\t/** @type {Boundary | null} */\n\tparent;\n\n\tis_pending = false;\n\n\t/**\n\t * API-level transformError transform function. Transforms errors before they reach the `failed` snippet.\n\t * Inherited from parent boundary, or defaults to identity.\n\t * @type {(error: unknown) => unknown}\n\t */\n\ttransform_error;\n\n\t/** @type {TemplateNode} */\n\t#anchor;\n\n\t/** @type {TemplateNode | null} */\n\t#hydrate_open = hydrating ? hydrate_node : null;\n\n\t/** @type {BoundaryProps} */\n\t#props;\n\n\t/** @type {((anchor: Node) => void)} */\n\t#children;\n\n\t/** @type {Effect} */\n\t#effect;\n\n\t/** @type {Effect | null} */\n\t#main_effect = null;\n\n\t/** @type {Effect | null} */\n\t#pending_effect = null;\n\n\t/** @type {Effect | null} */\n\t#failed_effect = null;\n\n\t/** @type {DocumentFragment | null} */\n\t#offscreen_fragment = null;\n\n\t#local_pending_count = 0;\n\t#pending_count = 0;\n\t#pending_count_update_queued = false;\n\n\t/** @type {Set} */\n\t#dirty_effects = new Set();\n\n\t/** @type {Set} */\n\t#maybe_dirty_effects = new Set();\n\n\t/**\n\t * A source containing the number of pending async deriveds/expressions.\n\t * Only created if `$effect.pending()` is used inside the boundary,\n\t * otherwise updating the source results in needless `Batch.ensure()`\n\t * calls followed by no-op flushes\n\t * @type {Source | null}\n\t */\n\t#effect_pending = null;\n\n\t#effect_pending_subscriber = createSubscriber(() => {\n\t\tthis.#effect_pending = source(this.#local_pending_count);\n\n\t\tif (DEV) {\n\t\t\ttag(this.#effect_pending, '$effect.pending()');\n\t\t}\n\n\t\treturn () => {\n\t\t\tthis.#effect_pending = null;\n\t\t};\n\t});\n\n\t/**\n\t * @param {TemplateNode} node\n\t * @param {BoundaryProps} props\n\t * @param {((anchor: Node) => void)} children\n\t * @param {((error: unknown) => unknown) | undefined} [transform_error]\n\t */\n\tconstructor(node, props, children, transform_error) {\n\t\tthis.#anchor = node;\n\t\tthis.#props = props;\n\n\t\tthis.#children = (anchor) => {\n\t\t\tvar effect = /** @type {Effect} */ (active_effect);\n\n\t\t\teffect.b = this;\n\t\t\teffect.f |= BOUNDARY_EFFECT;\n\n\t\t\tchildren(anchor);\n\t\t};\n\n\t\tthis.parent = /** @type {Effect} */ (active_effect).b;\n\n\t\t// Inherit transform_error from parent boundary, or use the provided one, or default to identity\n\t\tthis.transform_error = transform_error ?? this.parent?.transform_error ?? ((e) => e);\n\n\t\tthis.#effect = block(() => {\n\t\t\tif (hydrating) {\n\t\t\t\tconst comment = /** @type {Comment} */ (this.#hydrate_open);\n\t\t\t\thydrate_next();\n\n\t\t\t\tconst server_rendered_pending = comment.data === HYDRATION_START_ELSE;\n\t\t\t\tconst server_rendered_failed = comment.data.startsWith(HYDRATION_START_FAILED);\n\n\t\t\t\tif (server_rendered_failed) {\n\t\t\t\t\t// Server rendered the failed snippet - hydrate it.\n\t\t\t\t\t// The serialized error is embedded in the comment: \n\t\t\t\t\tconst serialized_error = JSON.parse(comment.data.slice(HYDRATION_START_FAILED.length));\n\t\t\t\t\tthis.#hydrate_failed_content(serialized_error);\n\t\t\t\t} else if (server_rendered_pending) {\n\t\t\t\t\tthis.#hydrate_pending_content();\n\t\t\t\t} else {\n\t\t\t\t\tthis.#hydrate_resolved_content();\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tthis.#render();\n\t\t\t}\n\t\t}, flags);\n\n\t\tif (hydrating) {\n\t\t\tthis.#anchor = hydrate_node;\n\t\t}\n\t}\n\n\t#hydrate_resolved_content() {\n\t\ttry {\n\t\t\tthis.#main_effect = branch(() => this.#children(this.#anchor));\n\t\t} catch (error) {\n\t\t\tthis.error(error);\n\t\t}\n\t}\n\n\t/**\n\t * @param {unknown} error The deserialized error from the server's hydration comment\n\t */\n\t#hydrate_failed_content(error) {\n\t\tconst failed = this.#props.failed;\n\t\tif (!failed) return;\n\n\t\tthis.#failed_effect = branch(() => {\n\t\t\tfailed(\n\t\t\t\tthis.#anchor,\n\t\t\t\t() => error,\n\t\t\t\t() => () => {}\n\t\t\t);\n\t\t});\n\t}\n\n\t#hydrate_pending_content() {\n\t\tconst pending = this.#props.pending;\n\t\tif (!pending) return;\n\n\t\tthis.is_pending = true;\n\t\tthis.#pending_effect = branch(() => pending(this.#anchor));\n\n\t\tqueue_micro_task(() => {\n\t\t\tvar fragment = (this.#offscreen_fragment = document.createDocumentFragment());\n\t\t\tvar anchor = create_text();\n\n\t\t\tfragment.append(anchor);\n\n\t\t\tthis.#main_effect = this.#run(() => {\n\t\t\t\treturn branch(() => this.#children(anchor));\n\t\t\t});\n\n\t\t\tif (this.#pending_count === 0) {\n\t\t\t\tthis.#anchor.before(fragment);\n\t\t\t\tthis.#offscreen_fragment = null;\n\n\t\t\t\tpause_effect(/** @type {Effect} */ (this.#pending_effect), () => {\n\t\t\t\t\tthis.#pending_effect = null;\n\t\t\t\t});\n\n\t\t\t\tthis.#resolve(/** @type {Batch} */ (current_batch));\n\t\t\t}\n\t\t});\n\t}\n\n\t#render() {\n\t\ttry {\n\t\t\tthis.is_pending = this.has_pending_snippet();\n\t\t\tthis.#pending_count = 0;\n\t\t\tthis.#local_pending_count = 0;\n\n\t\t\tthis.#main_effect = branch(() => {\n\t\t\t\tthis.#children(this.#anchor);\n\t\t\t});\n\n\t\t\tif (this.#pending_count > 0) {\n\t\t\t\tvar fragment = (this.#offscreen_fragment = document.createDocumentFragment());\n\t\t\t\tmove_effect(this.#main_effect, fragment);\n\n\t\t\t\tconst pending = /** @type {(anchor: Node) => void} */ (this.#props.pending);\n\t\t\t\tthis.#pending_effect = branch(() => pending(this.#anchor));\n\t\t\t} else {\n\t\t\t\tthis.#resolve(/** @type {Batch} */ (current_batch));\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tthis.error(error);\n\t\t}\n\t}\n\n\t/**\n\t * @param {Batch} batch\n\t */\n\t#resolve(batch) {\n\t\tthis.is_pending = false;\n\n\t\t// any effects that were previously deferred should be transferred\n\t\t// to the batch, which will flush in the next microtask\n\t\tbatch.transfer_effects(this.#dirty_effects, this.#maybe_dirty_effects);\n\t}\n\n\t/**\n\t * Defer an effect inside a pending boundary until the boundary resolves\n\t * @param {Effect} effect\n\t */\n\tdefer_effect(effect) {\n\t\tdefer_effect(effect, this.#dirty_effects, this.#maybe_dirty_effects);\n\t}\n\n\t/**\n\t * Returns `false` if the effect exists inside a boundary whose pending snippet is shown\n\t * @returns {boolean}\n\t */\n\tis_rendered() {\n\t\treturn !this.is_pending && (!this.parent || this.parent.is_rendered());\n\t}\n\n\thas_pending_snippet() {\n\t\treturn !!this.#props.pending;\n\t}\n\n\t/**\n\t * @template T\n\t * @param {() => T} fn\n\t */\n\t#run(fn) {\n\t\tvar previous_effect = active_effect;\n\t\tvar previous_reaction = active_reaction;\n\t\tvar previous_ctx = component_context;\n\n\t\tset_active_effect(this.#effect);\n\t\tset_active_reaction(this.#effect);\n\t\tset_component_context(this.#effect.ctx);\n\n\t\ttry {\n\t\t\tBatch.ensure();\n\t\t\treturn fn();\n\t\t} catch (e) {\n\t\t\thandle_error(e);\n\t\t\treturn null;\n\t\t} finally {\n\t\t\tset_active_effect(previous_effect);\n\t\t\tset_active_reaction(previous_reaction);\n\t\t\tset_component_context(previous_ctx);\n\t\t}\n\t}\n\n\t/**\n\t * Updates the pending count associated with the currently visible pending snippet,\n\t * if any, such that we can replace the snippet with content once work is done\n\t * @param {1 | -1} d\n\t * @param {Batch} batch\n\t */\n\t#update_pending_count(d, batch) {\n\t\tif (!this.has_pending_snippet()) {\n\t\t\tif (this.parent) {\n\t\t\t\tthis.parent.#update_pending_count(d, batch);\n\t\t\t}\n\n\t\t\t// if there's no parent, we're in a scope with no pending snippet\n\t\t\treturn;\n\t\t}\n\n\t\tthis.#pending_count += d;\n\n\t\tif (this.#pending_count === 0) {\n\t\t\tthis.#resolve(batch);\n\n\t\t\tif (this.#pending_effect) {\n\t\t\t\tpause_effect(this.#pending_effect, () => {\n\t\t\t\t\tthis.#pending_effect = null;\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tif (this.#offscreen_fragment) {\n\t\t\t\tthis.#anchor.before(this.#offscreen_fragment);\n\t\t\t\tthis.#offscreen_fragment = null;\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Update the source that powers `$effect.pending()` inside this boundary,\n\t * and controls when the current `pending` snippet (if any) is removed.\n\t * Do not call from inside the class\n\t * @param {1 | -1} d\n\t * @param {Batch} batch\n\t */\n\tupdate_pending_count(d, batch) {\n\t\tthis.#update_pending_count(d, batch);\n\n\t\tthis.#local_pending_count += d;\n\n\t\tif (!this.#effect_pending || this.#pending_count_update_queued) return;\n\t\tthis.#pending_count_update_queued = true;\n\n\t\tqueue_micro_task(() => {\n\t\t\tthis.#pending_count_update_queued = false;\n\t\t\tif (this.#effect_pending) {\n\t\t\t\tinternal_set(this.#effect_pending, this.#local_pending_count);\n\t\t\t}\n\t\t});\n\t}\n\n\tget_effect_pending() {\n\t\tthis.#effect_pending_subscriber();\n\t\treturn get(/** @type {Source} */ (this.#effect_pending));\n\t}\n\n\t/** @param {unknown} error */\n\terror(error) {\n\t\t// If we have nothing to capture the error, or if we hit an error while\n\t\t// rendering the fallback, re-throw for another boundary to handle\n\t\tif (!this.#props.onerror && !this.#props.failed) {\n\t\t\tthrow error;\n\t\t}\n\n\t\tif (current_batch?.is_fork) {\n\t\t\tif (this.#main_effect) current_batch.skip_effect(this.#main_effect);\n\t\t\tif (this.#pending_effect) current_batch.skip_effect(this.#pending_effect);\n\t\t\tif (this.#failed_effect) current_batch.skip_effect(this.#failed_effect);\n\n\t\t\tcurrent_batch.on_fork_commit(() => {\n\t\t\t\tthis.#handle_error(error);\n\t\t\t});\n\t\t} else {\n\t\t\tthis.#handle_error(error);\n\t\t}\n\t}\n\n\t/**\n\t * @param {unknown} error\n\t */\n\t#handle_error(error) {\n\t\tif (this.#main_effect) {\n\t\t\tdestroy_effect(this.#main_effect);\n\t\t\tthis.#main_effect = null;\n\t\t}\n\n\t\tif (this.#pending_effect) {\n\t\t\tdestroy_effect(this.#pending_effect);\n\t\t\tthis.#pending_effect = null;\n\t\t}\n\n\t\tif (this.#failed_effect) {\n\t\t\tdestroy_effect(this.#failed_effect);\n\t\t\tthis.#failed_effect = null;\n\t\t}\n\n\t\tif (hydrating) {\n\t\t\tset_hydrate_node(/** @type {TemplateNode} */ (this.#hydrate_open));\n\t\t\tnext();\n\t\t\tset_hydrate_node(skip_nodes());\n\t\t}\n\n\t\tvar onerror = this.#props.onerror;\n\t\tlet failed = this.#props.failed;\n\t\tvar did_reset = false;\n\t\tvar calling_on_error = false;\n\n\t\tconst reset = () => {\n\t\t\tif (did_reset) {\n\t\t\t\tw.svelte_boundary_reset_noop();\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tdid_reset = true;\n\n\t\t\tif (calling_on_error) {\n\t\t\t\te.svelte_boundary_reset_onerror();\n\t\t\t}\n\n\t\t\tif (this.#failed_effect !== null) {\n\t\t\t\tpause_effect(this.#failed_effect, () => {\n\t\t\t\t\tthis.#failed_effect = null;\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tthis.#run(() => {\n\t\t\t\tthis.#render();\n\t\t\t});\n\t\t};\n\n\t\t/** @param {unknown} transformed_error */\n\t\tconst handle_error_result = (transformed_error) => {\n\t\t\ttry {\n\t\t\t\tcalling_on_error = true;\n\t\t\t\tonerror?.(transformed_error, reset);\n\t\t\t\tcalling_on_error = false;\n\t\t\t} catch (error) {\n\t\t\t\tinvoke_error_boundary(error, this.#effect && this.#effect.parent);\n\t\t\t}\n\n\t\t\tif (failed) {\n\t\t\t\tthis.#failed_effect = this.#run(() => {\n\t\t\t\t\ttry {\n\t\t\t\t\t\treturn branch(() => {\n\t\t\t\t\t\t\t// errors in `failed` snippets cause the boundary to error again\n\t\t\t\t\t\t\t// TODO Svelte 6: revisit this decision, most likely better to go to parent boundary instead\n\t\t\t\t\t\t\tvar effect = /** @type {Effect} */ (active_effect);\n\n\t\t\t\t\t\t\teffect.b = this;\n\t\t\t\t\t\t\teffect.f |= BOUNDARY_EFFECT;\n\n\t\t\t\t\t\t\tfailed(\n\t\t\t\t\t\t\t\tthis.#anchor,\n\t\t\t\t\t\t\t\t() => transformed_error,\n\t\t\t\t\t\t\t\t() => reset\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t});\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\tinvoke_error_boundary(error, /** @type {Effect} */ (this.#effect.parent));\n\t\t\t\t\t\treturn null;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t};\n\n\t\tqueue_micro_task(() => {\n\t\t\t// Run the error through the API-level transformError transform (e.g. SvelteKit's handleError)\n\t\t\t/** @type {unknown} */\n\t\t\tvar result;\n\t\t\ttry {\n\t\t\t\tresult = this.transform_error(error);\n\t\t\t} catch (e) {\n\t\t\t\tinvoke_error_boundary(e, this.#effect && this.#effect.parent);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tif (\n\t\t\t\tresult !== null &&\n\t\t\t\ttypeof result === 'object' &&\n\t\t\t\ttypeof (/** @type {any} */ (result).then) === 'function'\n\t\t\t) {\n\t\t\t\t// transformError returned a Promise — wait for it\n\t\t\t\t/** @type {any} */ (result).then(\n\t\t\t\t\thandle_error_result,\n\t\t\t\t\t/** @param {unknown} e */\n\t\t\t\t\t(e) => invoke_error_boundary(e, this.#effect && this.#effect.parent)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Synchronous result — handle immediately\n\t\t\t\thandle_error_result(result);\n\t\t\t}\n\t\t});\n\t}\n}\n\nexport function pending() {\n\tif (active_effect === null) {\n\t\te.effect_pending_outside_reaction();\n\t}\n\n\tvar boundary = active_effect.b;\n\n\tif (boundary === null) {\n\t\treturn 0; // TODO eventually we will need this to be global\n\t}\n\n\treturn boundary.get_effect_pending();\n}\n","/** @import { Blocker, Effect, Value } from '#client' */\nimport { DESTROYED, STALE_REACTION } from '#client/constants';\nimport { DEV } from 'esm-env';\nimport {\n\tcomponent_context,\n\tdev_stack,\n\tis_runes,\n\tset_component_context,\n\tset_dev_stack\n} from '../context.js';\nimport { Boundary } from '../dom/blocks/boundary.js';\nimport { invoke_error_boundary } from '../error-handling.js';\nimport {\n\tactive_effect,\n\tactive_reaction,\n\tset_active_effect,\n\tset_active_reaction\n} from '../runtime.js';\nimport { Batch, current_batch } from './batch.js';\nimport {\n\tasync_derived,\n\treactivity_loss_tracker,\n\tderived,\n\tderived_safe_equal,\n\tset_reactivity_loss_tracker\n} from './deriveds.js';\nimport { aborted } from './effects.js';\n\n/**\n * @param {Blocker[]} blockers\n * @param {Array<() => any>} sync\n * @param {Array<() => Promise>} async\n * @param {(values: Value[]) => any} fn\n */\nexport function flatten(blockers, sync, async, fn) {\n\tconst d = is_runes() ? derived : derived_safe_equal;\n\n\t// Filter out already-settled blockers - no need to wait for them\n\tvar pending = blockers.filter((b) => !b.settled);\n\n\tif (async.length === 0 && pending.length === 0) {\n\t\tfn(sync.map(d));\n\t\treturn;\n\t}\n\n\tvar parent = /** @type {Effect} */ (active_effect);\n\n\tvar restore = capture();\n\tvar blocker_promise =\n\t\tpending.length === 1\n\t\t\t? pending[0].promise\n\t\t\t: pending.length > 1\n\t\t\t\t? Promise.all(pending.map((b) => b.promise))\n\t\t\t\t: null;\n\n\t/** @param {Value[]} values */\n\tfunction finish(values) {\n\t\trestore();\n\n\t\ttry {\n\t\t\tfn(values);\n\t\t} catch (error) {\n\t\t\tif ((parent.f & DESTROYED) === 0) {\n\t\t\t\tinvoke_error_boundary(error, parent);\n\t\t\t}\n\t\t}\n\n\t\tunset_context();\n\t}\n\n\t// Fast path: blockers but no async expressions\n\tif (async.length === 0) {\n\t\t/** @type {Promise} */ (blocker_promise).then(() => finish(sync.map(d)));\n\t\treturn;\n\t}\n\n\tvar decrement_pending = increment_pending();\n\n\t// Full path: has async expressions\n\tfunction run() {\n\t\tPromise.all(async.map((expression) => async_derived(expression)))\n\t\t\t.then((result) => finish([...sync.map(d), ...result]))\n\t\t\t.catch((error) => invoke_error_boundary(error, parent))\n\t\t\t.finally(() => decrement_pending());\n\t}\n\n\tif (blocker_promise) {\n\t\tblocker_promise.then(() => {\n\t\t\trestore();\n\t\t\trun();\n\t\t\tunset_context();\n\t\t});\n\t} else {\n\t\trun();\n\t}\n}\n\n/**\n * @param {Blocker[]} blockers\n * @param {(values: Value[]) => any} fn\n */\nexport function run_after_blockers(blockers, fn) {\n\tflatten(blockers, [], [], fn);\n}\n\n/**\n * Captures the current effect context so that we can restore it after\n * some asynchronous work has happened (so that e.g. `await a + b`\n * causes `b` to be registered as a dependency).\n */\nexport function capture() {\n\tvar previous_effect = /** @type {Effect} */ (active_effect);\n\tvar previous_reaction = active_reaction;\n\tvar previous_component_context = component_context;\n\tvar previous_batch = /** @type {Batch} */ (current_batch);\n\n\tif (DEV) {\n\t\tvar previous_dev_stack = dev_stack;\n\t}\n\n\treturn function restore(activate_batch = true) {\n\t\tset_active_effect(previous_effect);\n\t\tset_active_reaction(previous_reaction);\n\t\tset_component_context(previous_component_context);\n\n\t\tif (activate_batch && (previous_effect.f & DESTROYED) === 0) {\n\t\t\t// TODO we only need optional chaining here because `{#await ...}` blocks\n\t\t\t// are anomalous. Once we retire them we can get rid of it\n\t\t\tprevious_batch?.activate();\n\t\t\tprevious_batch?.apply();\n\t\t}\n\n\t\tif (DEV) {\n\t\t\tset_reactivity_loss_tracker(null);\n\t\t\tset_dev_stack(previous_dev_stack);\n\t\t}\n\t};\n}\n\n/**\n * Wraps an `await` expression in such a way that the effect context that was\n * active before the expression evaluated can be reapplied afterwards —\n * `await a + b` becomes `(await $.save(a))() + b`\n * @template T\n * @param {Promise} promise\n * @returns {Promise<() => T>}\n */\nexport async function save(promise) {\n\tvar restore = capture();\n\tvar value = await promise;\n\n\treturn () => {\n\t\trestore();\n\t\treturn value;\n\t};\n}\n\n/**\n * Reset `current_async_effect` after the `promise` resolves, so\n * that we can emit `await_reactivity_loss` warnings\n * @template T\n * @param {Promise} promise\n * @returns {Promise<() => T>}\n */\nexport async function track_reactivity_loss(promise) {\n\tvar previous_async_effect = reactivity_loss_tracker;\n\t// Ensure that unrelated reads after an async operation is kicked off don't cause false positives\n\tqueueMicrotask(() => {\n\t\tif (reactivity_loss_tracker === previous_async_effect) {\n\t\t\tset_reactivity_loss_tracker(null);\n\t\t}\n\t});\n\n\tvar value = await promise;\n\n\treturn () => {\n\t\tset_reactivity_loss_tracker(previous_async_effect);\n\t\t// While this can result in false negatives it also guards against the more important\n\t\t// false positives that would occur if this is the last in a chain of async operations,\n\t\t// and the reactivity_loss_tracker would then stay around until the next async operation happens.\n\t\tqueueMicrotask(() => {\n\t\t\tif (reactivity_loss_tracker === previous_async_effect) {\n\t\t\t\tset_reactivity_loss_tracker(null);\n\t\t\t}\n\t\t});\n\n\t\treturn value;\n\t};\n}\n\n/**\n * Used in `for await` loops in DEV, so\n * that we can emit `await_reactivity_loss` warnings\n * after each `async_iterator` result resolves and\n * after the `async_iterator` return resolves (if it runs)\n * @template T\n * @template TReturn\n * @param {Iterable | AsyncIterable} iterable\n * @returns {AsyncGenerator}\n */\nexport async function* for_await_track_reactivity_loss(iterable) {\n\t// This is based on the algorithms described in ECMA-262:\n\t// ForIn/OfBodyEvaluation\n\t// https://tc39.es/ecma262/multipage/ecmascript-language-statements-and-declarations.html#sec-runtime-semantics-forin-div-ofbodyevaluation-lhs-stmt-iterator-lhskind-labelset\n\t// AsyncIteratorClose\n\t// https://tc39.es/ecma262/multipage/abstract-operations.html#sec-asynciteratorclose\n\n\t/** @type {AsyncIterator} */\n\t// @ts-ignore\n\tconst iterator = iterable[Symbol.asyncIterator]?.() ?? iterable[Symbol.iterator]?.();\n\n\tif (iterator === undefined) {\n\t\tthrow new TypeError('value is not async iterable');\n\t}\n\n\t/** Whether the completion of the iterator was \"normal\", meaning it wasn't ended via `break` or a similar method */\n\tlet normal_completion = false;\n\ttry {\n\t\twhile (true) {\n\t\t\tconst { done, value } = (await track_reactivity_loss(iterator.next()))();\n\t\t\tif (done) {\n\t\t\t\tnormal_completion = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tvar prev = reactivity_loss_tracker;\n\t\t\tyield value;\n\t\t\tset_reactivity_loss_tracker(prev);\n\t\t}\n\t} finally {\n\t\t// If the iterator had an abrupt completion and `return` is defined on the iterator, call it and return the value\n\t\tif (!normal_completion && iterator.return !== undefined) {\n\t\t\t// eslint-disable-next-line no-unsafe-finally\n\t\t\treturn /** @type {TReturn} */ ((await track_reactivity_loss(iterator.return()))().value);\n\t\t}\n\t}\n}\n\nexport function unset_context(deactivate_batch = true) {\n\tset_active_effect(null);\n\tset_active_reaction(null);\n\tset_component_context(null);\n\tif (deactivate_batch) current_batch?.deactivate();\n\n\tif (DEV) {\n\t\tset_reactivity_loss_tracker(null);\n\t\tset_dev_stack(null);\n\t}\n}\n\n/**\n * @param {Array<() => void | Promise>} thunks\n */\nexport function run(thunks) {\n\tconst restore = capture();\n\n\tconst decrement_pending = increment_pending();\n\n\tvar active = /** @type {Effect} */ (active_effect);\n\n\t/** @type {null | { error: any }} */\n\tvar errored = null;\n\n\t/** @param {any} error */\n\tconst handle_error = (error) => {\n\t\terrored = { error }; // wrap in object in case a promise rejects with a falsy value\n\n\t\tif (!aborted(active)) {\n\t\t\tinvoke_error_boundary(error, active);\n\t\t}\n\t};\n\n\tvar promise = Promise.resolve(thunks[0]()).catch(handle_error);\n\n\t/** @type {Blocker} */\n\tvar blocker = { promise, settled: false };\n\tvar blockers = [blocker];\n\n\tpromise.finally(() => {\n\t\tblocker.settled = true;\n\t\tunset_context();\n\t});\n\n\tfor (const fn of thunks.slice(1)) {\n\t\tpromise = promise\n\t\t\t.then(() => {\n\t\t\t\trestore();\n\n\t\t\t\tif (errored) {\n\t\t\t\t\tthrow errored.error;\n\t\t\t\t}\n\n\t\t\t\tif (aborted(active)) {\n\t\t\t\t\tthrow STALE_REACTION;\n\t\t\t\t}\n\n\t\t\t\treturn fn();\n\t\t\t})\n\t\t\t.catch(handle_error);\n\n\t\tconst blocker = { promise, settled: false };\n\t\tblockers.push(blocker);\n\n\t\tpromise.finally(() => {\n\t\t\tblocker.settled = true;\n\t\t\tunset_context();\n\t\t});\n\t}\n\n\tpromise\n\t\t// wait one more tick, so that template effects are\n\t\t// guaranteed to run before `$effect(...)`\n\t\t.then(() => Promise.resolve())\n\t\t.finally(() => decrement_pending());\n\n\treturn blockers;\n}\n\n/**\n * @param {Blocker[]} blockers\n */\nexport function wait(blockers) {\n\treturn Promise.all(blockers.map((b) => b.promise));\n}\n\n/**\n * @returns {(skip?: boolean) => void}\n */\nexport function increment_pending() {\n\tvar effect = /** @type {Effect} */ (active_effect);\n\tvar boundary = /** @type {Boundary} */ (effect.b);\n\tvar batch = /** @type {Batch} */ (current_batch);\n\tvar blocking = boundary.is_rendered();\n\n\tboundary.update_pending_count(1, batch);\n\tbatch.increment(blocking, effect);\n\n\treturn (skip = false) => {\n\t\tboundary.update_pending_count(-1, batch);\n\t\tbatch.decrement(blocking, effect, skip);\n\t};\n}\n","/** @import { Derived, Effect, Reaction, Source, Value } from '#client' */\n/** @import { Batch } from './batch.js'; */\n/** @import { Boundary } from '../dom/blocks/boundary.js'; */\nimport { DEV } from 'esm-env';\nimport {\n\tERROR_VALUE,\n\tDERIVED,\n\tDIRTY,\n\tEFFECT_PRESERVED,\n\tSTALE_REACTION,\n\tASYNC,\n\tWAS_MARKED,\n\tDESTROYED,\n\tCLEAN,\n\tREACTION_RAN,\n\tINERT\n} from '#client/constants';\nimport {\n\tactive_reaction,\n\tactive_effect,\n\tupdate_reaction,\n\tincrement_write_version,\n\tset_active_effect,\n\tpush_reaction_value,\n\tis_destroying_effect,\n\tupdate_effect,\n\tremove_reactions,\n\tskipped_deps,\n\tnew_deps\n} from '../runtime.js';\nimport { equals, safe_equals } from './equality.js';\nimport * as e from '../errors.js';\nimport * as w from '../warnings.js';\nimport {\n\tasync_effect,\n\tdestroy_effect,\n\tdestroy_effect_children,\n\teffect_tracking,\n\tteardown\n} from './effects.js';\nimport { eager_effects, internal_set, set_eager_effects, source } from './sources.js';\nimport { get_error } from '../../shared/dev.js';\nimport { async_mode_flag, tracing_mode_flag } from '../../flags/index.js';\nimport { component_context } from '../context.js';\nimport { UNINITIALIZED } from '../../../constants.js';\nimport { batch_values, current_batch } from './batch.js';\nimport { increment_pending, unset_context } from './async.js';\nimport { deferred, includes, noop } from '../../shared/utils.js';\nimport { set_signal_status, update_derived_status } from './status.js';\n\n/**\n * This allows us to track 'reactivity loss' that occurs when signals\n * are read after a non-context-restoring `await`. Dev-only\n * @type {{ effect: Effect, effect_deps: Set, warned: boolean } | null}\n */\nexport let reactivity_loss_tracker = null;\n\n/** @param {{ effect: Effect, effect_deps: Set, warned: boolean } | null} v */\nexport function set_reactivity_loss_tracker(v) {\n\treactivity_loss_tracker = v;\n}\n\nexport const recent_async_deriveds = new Set();\n\n/**\n * @template V\n * @param {() => V} fn\n * @returns {Derived}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function derived(fn) {\n\tvar flags = DERIVED | DIRTY;\n\n\tif (active_effect !== null) {\n\t\t// Since deriveds are evaluated lazily, any effects created inside them are\n\t\t// created too late to ensure that the parent effect is added to the tree\n\t\tactive_effect.f |= EFFECT_PRESERVED;\n\t}\n\n\t/** @type {Derived} */\n\tconst signal = {\n\t\tctx: component_context,\n\t\tdeps: null,\n\t\teffects: null,\n\t\tequals,\n\t\tf: flags,\n\t\tfn,\n\t\treactions: null,\n\t\trv: 0,\n\t\tv: /** @type {V} */ (UNINITIALIZED),\n\t\twv: 0,\n\t\tparent: active_effect,\n\t\tac: null\n\t};\n\n\tif (DEV && tracing_mode_flag) {\n\t\tsignal.created = get_error('created at');\n\t}\n\n\treturn signal;\n}\n\n/**\n * @template V\n * @param {() => V | Promise} fn\n * @param {string} [label]\n * @param {string} [location] If provided, print a warning if the value is not read immediately after update\n * @returns {Promise>}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function async_derived(fn, label, location) {\n\tlet parent = /** @type {Effect | null} */ (active_effect);\n\n\tif (parent === null) {\n\t\te.async_derived_orphan();\n\t}\n\n\tvar promise = /** @type {Promise} */ (/** @type {unknown} */ (undefined));\n\tvar signal = source(/** @type {V} */ (UNINITIALIZED));\n\n\tif (DEV) signal.label = label;\n\n\t// only suspend in async deriveds created on initialisation\n\tvar should_suspend = !active_reaction;\n\n\t/** @type {Map>>} */\n\tvar deferreds = new Map();\n\n\tasync_effect(() => {\n\t\tvar effect = /** @type {Effect} */ (active_effect);\n\n\t\tif (DEV) {\n\t\t\treactivity_loss_tracker = { effect, effect_deps: new Set(), warned: false };\n\t\t}\n\n\t\t/** @type {ReturnType>} */\n\t\tvar d = deferred();\n\t\tpromise = d.promise;\n\n\t\ttry {\n\t\t\t// If this code is changed at some point, make sure to still access the then property\n\t\t\t// of fn() to read any signals it might access, so that we track them as dependencies.\n\t\t\t// We call `unset_context` to undo any `save` calls that happen inside `fn()`\n\t\t\tPromise.resolve(fn()).then(d.resolve, d.reject).finally(unset_context);\n\t\t} catch (error) {\n\t\t\td.reject(error);\n\t\t\tunset_context();\n\t\t}\n\n\t\tif (DEV) {\n\t\t\tif (reactivity_loss_tracker) {\n\t\t\t\t// Reused deps from previous run (indices 0 to skipped_deps-1)\n\t\t\t\t// We deliberately only track direct dependencies of the async expression to encourage\n\t\t\t\t// dependencies being directly visible at the point of the expression\n\t\t\t\tif (effect.deps !== null) {\n\t\t\t\t\tfor (let i = 0; i < skipped_deps; i += 1) {\n\t\t\t\t\t\treactivity_loss_tracker.effect_deps.add(effect.deps[i]);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// New deps discovered this run\n\t\t\t\tif (new_deps !== null) {\n\t\t\t\t\tfor (let i = 0; i < new_deps.length; i += 1) {\n\t\t\t\t\t\treactivity_loss_tracker.effect_deps.add(new_deps[i]);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treactivity_loss_tracker = null;\n\t\t}\n\n\t\tvar batch = /** @type {Batch} */ (current_batch);\n\n\t\tif (should_suspend) {\n\t\t\t// we only increment the batch's pending state for updates, not creation, otherwise\n\t\t\t// we will decrement to zero before the work that depends on this promise (e.g. a\n\t\t\t// template effect) has initialized, causing the batch to resolve prematurely\n\t\t\tif ((effect.f & REACTION_RAN) !== 0) {\n\t\t\t\tvar decrement_pending = increment_pending();\n\t\t\t}\n\n\t\t\tif (/** @type {Boundary} */ (parent.b).is_rendered()) {\n\t\t\t\tdeferreds.get(batch)?.reject(STALE_REACTION);\n\t\t\t\tdeferreds.delete(batch); // delete to ensure correct order in Map iteration below\n\t\t\t} else {\n\t\t\t\t// While the boundary is still showing pending, a new run supersedes all older in-flight runs\n\t\t\t\t// for this async expression. Cancel eagerly so resolution cannot commit stale values.\n\t\t\t\tfor (const d of deferreds.values()) {\n\t\t\t\t\td.reject(STALE_REACTION);\n\t\t\t\t}\n\t\t\t\tdeferreds.clear();\n\t\t\t}\n\n\t\t\tdeferreds.set(batch, d);\n\t\t}\n\n\t\t/**\n\t\t * @param {any} value\n\t\t * @param {unknown} error\n\t\t */\n\t\tconst handler = (value, error = undefined) => {\n\t\t\tif (DEV) {\n\t\t\t\treactivity_loss_tracker = null;\n\t\t\t}\n\n\t\t\tif (decrement_pending) {\n\t\t\t\t// don't trigger an update if we're only here because\n\t\t\t\t// the promise was superseded before it could resolve\n\t\t\t\tvar skip = error === STALE_REACTION;\n\t\t\t\tdecrement_pending(skip);\n\t\t\t}\n\n\t\t\tif (error === STALE_REACTION || (effect.f & DESTROYED) !== 0) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tbatch.activate();\n\n\t\t\tif (error) {\n\t\t\t\tsignal.f |= ERROR_VALUE;\n\n\t\t\t\t// @ts-expect-error the error is the wrong type, but we don't care\n\t\t\t\tinternal_set(signal, error);\n\t\t\t} else {\n\t\t\t\tif ((signal.f & ERROR_VALUE) !== 0) {\n\t\t\t\t\tsignal.f ^= ERROR_VALUE;\n\t\t\t\t}\n\n\t\t\t\tinternal_set(signal, value);\n\n\t\t\t\t// All prior async derived runs are now stale\n\t\t\t\tfor (const [b, d] of deferreds) {\n\t\t\t\t\tdeferreds.delete(b);\n\t\t\t\t\tif (b === batch) break;\n\t\t\t\t\td.reject(STALE_REACTION);\n\t\t\t\t}\n\n\t\t\t\tif (DEV && location !== undefined) {\n\t\t\t\t\trecent_async_deriveds.add(signal);\n\n\t\t\t\t\tsetTimeout(() => {\n\t\t\t\t\t\tif (recent_async_deriveds.has(signal)) {\n\t\t\t\t\t\t\tw.await_waterfall(/** @type {string} */ (signal.label), location);\n\t\t\t\t\t\t\trecent_async_deriveds.delete(signal);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbatch.deactivate();\n\t\t};\n\n\t\td.promise.then(handler, (e) => handler(null, e || 'unknown'));\n\t});\n\n\tteardown(() => {\n\t\tfor (const d of deferreds.values()) {\n\t\t\td.reject(STALE_REACTION);\n\t\t}\n\t});\n\n\tif (DEV) {\n\t\t// add a flag that lets this be printed as a derived\n\t\t// when using `$inspect.trace()`\n\t\tsignal.f |= ASYNC;\n\t}\n\n\treturn new Promise((fulfil) => {\n\t\t/** @param {Promise} p */\n\t\tfunction next(p) {\n\t\t\tfunction go() {\n\t\t\t\tif (p === promise) {\n\t\t\t\t\tfulfil(signal);\n\t\t\t\t} else {\n\t\t\t\t\t// if the effect re-runs before the initial promise\n\t\t\t\t\t// resolves, delay resolution until we have a value\n\t\t\t\t\tnext(promise);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tp.then(go, go);\n\t\t}\n\n\t\tnext(promise);\n\t});\n}\n\n/**\n * @template V\n * @param {() => V} fn\n * @returns {Derived}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function user_derived(fn) {\n\tconst d = derived(fn);\n\n\tif (!async_mode_flag) push_reaction_value(d);\n\n\treturn d;\n}\n\n/**\n * @template V\n * @param {() => V} fn\n * @returns {Derived}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function derived_safe_equal(fn) {\n\tconst signal = derived(fn);\n\tsignal.equals = safe_equals;\n\treturn signal;\n}\n\n/**\n * @param {Derived} derived\n * @returns {void}\n */\nexport function destroy_derived_effects(derived) {\n\tvar effects = derived.effects;\n\n\tif (effects !== null) {\n\t\tderived.effects = null;\n\n\t\tfor (var i = 0; i < effects.length; i += 1) {\n\t\t\tdestroy_effect(/** @type {Effect} */ (effects[i]));\n\t\t}\n\t}\n}\n\n/**\n * The currently updating deriveds, used to detect infinite recursion\n * in dev mode and provide a nicer error than 'too much recursion'\n * @type {Derived[]}\n */\nlet stack = [];\n\n/**\n * @template T\n * @param {Derived} derived\n * @returns {T}\n */\nexport function execute_derived(derived) {\n\tvar value;\n\tvar prev_active_effect = active_effect;\n\tvar parent = derived.parent;\n\n\tif (!is_destroying_effect && parent !== null && (parent.f & (DESTROYED | INERT)) !== 0) {\n\t\tw.derived_inert();\n\n\t\treturn derived.v;\n\t}\n\n\tset_active_effect(parent);\n\n\tif (DEV) {\n\t\tlet prev_eager_effects = eager_effects;\n\t\tset_eager_effects(new Set());\n\t\ttry {\n\t\t\tif (includes.call(stack, derived)) {\n\t\t\t\te.derived_references_self();\n\t\t\t}\n\n\t\t\tstack.push(derived);\n\n\t\t\tderived.f &= ~WAS_MARKED;\n\t\t\tdestroy_derived_effects(derived);\n\t\t\tvalue = update_reaction(derived);\n\t\t} finally {\n\t\t\tset_active_effect(prev_active_effect);\n\t\t\tset_eager_effects(prev_eager_effects);\n\t\t\tstack.pop();\n\t\t}\n\t} else {\n\t\ttry {\n\t\t\tderived.f &= ~WAS_MARKED;\n\t\t\tdestroy_derived_effects(derived);\n\t\t\tvalue = update_reaction(derived);\n\t\t} finally {\n\t\t\tset_active_effect(prev_active_effect);\n\t\t}\n\t}\n\n\treturn value;\n}\n\n/**\n * @param {Derived} derived\n * @returns {void}\n */\nexport function update_derived(derived) {\n\tvar value = execute_derived(derived);\n\n\tif (!derived.equals(value)) {\n\t\tderived.wv = increment_write_version();\n\n\t\t// in a fork, we don't update the underlying value, just `batch_values`.\n\t\t// the underlying value will be updated when the fork is committed.\n\t\t// otherwise, the next time we get here after a 'real world' state\n\t\t// change, `derived.equals` may incorrectly return `true`\n\t\tif (!current_batch?.is_fork || derived.deps === null) {\n\t\t\tif (current_batch !== null) {\n\t\t\t\tcurrent_batch.capture(derived, value, true);\n\t\t\t} else {\n\t\t\t\tderived.v = value;\n\t\t\t}\n\n\t\t\t// deriveds without dependencies should never be recomputed\n\t\t\tif (derived.deps === null) {\n\t\t\t\tset_signal_status(derived, CLEAN);\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n\n\t// don't mark derived clean if we're reading it inside a\n\t// cleanup function, or it will cache a stale value\n\tif (is_destroying_effect) {\n\t\treturn;\n\t}\n\n\t// During time traveling we don't want to reset the status so that\n\t// traversal of the graph in the other batches still happens\n\tif (batch_values !== null) {\n\t\t// only cache the value if we're in a tracking context, otherwise we won't\n\t\t// clear the cache in `mark_reactions` when dependencies are updated\n\t\tif (effect_tracking() || current_batch?.is_fork) {\n\t\t\tbatch_values.set(derived, value);\n\t\t}\n\t} else {\n\t\tupdate_derived_status(derived);\n\t}\n}\n\n/**\n * @param {Derived} derived\n */\nexport function freeze_derived_effects(derived) {\n\tif (derived.effects === null) return;\n\n\tfor (const e of derived.effects) {\n\t\t// if the effect has a teardown function or abort signal, call it\n\t\tif (e.teardown || e.ac) {\n\t\t\te.teardown?.();\n\t\t\te.ac?.abort(STALE_REACTION);\n\n\t\t\t// make it a noop so it doesn't get called again if the derived\n\t\t\t// is unfrozen. we don't set it to `null`, because the existence\n\t\t\t// of a teardown function is what determines whether the\n\t\t\t// effect runs again during unfreezing\n\t\t\te.teardown = noop;\n\t\t\te.ac = null;\n\n\t\t\tremove_reactions(e, 0);\n\t\t\tdestroy_effect_children(e);\n\t\t}\n\t}\n}\n\n/**\n * @param {Derived} derived\n */\nexport function unfreeze_derived_effects(derived) {\n\tif (derived.effects === null) return;\n\n\tfor (const e of derived.effects) {\n\t\t// if the effect was previously frozen — indicated by the presence\n\t\t// of a teardown function — unfreeze it\n\t\tif (e.teardown) {\n\t\t\tupdate_effect(e);\n\t\t}\n\t}\n}\n","/** @import { Derived, Effect, Source, Value } from '#client' */\nimport { DEV } from 'esm-env';\nimport {\n\tactive_reaction,\n\tactive_effect,\n\tuntracked_writes,\n\tget,\n\tset_untracked_writes,\n\tuntrack,\n\tincrement_write_version,\n\tupdate_effect,\n\tcurrent_sources,\n\tis_dirty,\n\tuntracking,\n\tis_destroying_effect,\n\tpush_reaction_value\n} from '../runtime.js';\nimport { equals, safe_equals } from './equality.js';\nimport {\n\tCLEAN,\n\tDERIVED,\n\tDIRTY,\n\tBRANCH_EFFECT,\n\tEAGER_EFFECT,\n\tMAYBE_DIRTY,\n\tBLOCK_EFFECT,\n\tROOT_EFFECT,\n\tASYNC,\n\tWAS_MARKED,\n\tCONNECTED,\n\tREACTION_IS_UPDATING\n} from '#client/constants';\nimport * as e from '../errors.js';\nimport { legacy_mode_flag, tracing_mode_flag } from '../../flags/index.js';\nimport { includes } from '../../shared/utils.js';\nimport { tag_proxy } from '../dev/tracing.js';\nimport { get_error } from '../../shared/dev.js';\nimport { component_context, is_runes } from '../context.js';\nimport {\n\tBatch,\n\tbatch_values,\n\teager_block_effects,\n\tschedule_effect,\n\tlegacy_updates\n} from './batch.js';\nimport { proxy } from '../proxy.js';\nimport { execute_derived } from './deriveds.js';\nimport { set_signal_status, update_derived_status } from './status.js';\n\n/** @type {Set} */\nexport let eager_effects = new Set();\n\n/** @type {Map} */\nexport const old_values = new Map();\n\n/**\n * @param {Set} v\n */\nexport function set_eager_effects(v) {\n\teager_effects = v;\n}\n\nlet eager_effects_deferred = false;\n\nexport function set_eager_effects_deferred() {\n\teager_effects_deferred = true;\n}\n\n/**\n * @template V\n * @param {V} v\n * @param {Error | null} [stack]\n * @returns {Source}\n */\n// TODO rename this to `state` throughout the codebase\nexport function source(v, stack) {\n\t/** @type {Value} */\n\tvar signal = {\n\t\tf: 0, // TODO ideally we could skip this altogether, but it causes type errors\n\t\tv,\n\t\treactions: null,\n\t\tequals,\n\t\trv: 0,\n\t\twv: 0\n\t};\n\n\tif (DEV && tracing_mode_flag) {\n\t\tsignal.created = stack ?? get_error('created at');\n\t\tsignal.updated = null;\n\t\tsignal.set_during_effect = false;\n\t\tsignal.trace = null;\n\t}\n\n\treturn signal;\n}\n\n/**\n * @template V\n * @param {V} v\n * @param {Error | null} [stack]\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function state(v, stack) {\n\tconst s = source(v, stack);\n\n\tpush_reaction_value(s);\n\n\treturn s;\n}\n\n/**\n * @template V\n * @param {V} initial_value\n * @param {boolean} [immutable]\n * @returns {Source}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function mutable_source(initial_value, immutable = false, trackable = true) {\n\tconst s = source(initial_value);\n\tif (!immutable) {\n\t\ts.equals = safe_equals;\n\t}\n\n\t// bind the signal to the component context, in case we need to\n\t// track updates to trigger beforeUpdate/afterUpdate callbacks\n\tif (legacy_mode_flag && trackable && component_context !== null && component_context.l !== null) {\n\t\t(component_context.l.s ??= []).push(s);\n\t}\n\n\treturn s;\n}\n\n/**\n * @template V\n * @param {Value} source\n * @param {V} value\n */\nexport function mutate(source, value) {\n\tset(\n\t\tsource,\n\t\tuntrack(() => get(source))\n\t);\n\treturn value;\n}\n\n/**\n * @template V\n * @param {Source} source\n * @param {V} value\n * @param {boolean} [should_proxy]\n * @returns {V}\n */\nexport function set(source, value, should_proxy = false) {\n\tif (\n\t\tactive_reaction !== null &&\n\t\t// since we are untracking the function inside `$inspect.with` we need to add this check\n\t\t// to ensure we error if state is set inside an inspect effect\n\t\t(!untracking || (active_reaction.f & EAGER_EFFECT) !== 0) &&\n\t\tis_runes() &&\n\t\t(active_reaction.f & (DERIVED | BLOCK_EFFECT | ASYNC | EAGER_EFFECT)) !== 0 &&\n\t\t(current_sources === null || !includes.call(current_sources, source))\n\t) {\n\t\te.state_unsafe_mutation();\n\t}\n\n\tlet new_value = should_proxy ? proxy(value) : value;\n\n\tif (DEV) {\n\t\ttag_proxy(new_value, /** @type {string} */ (source.label));\n\t}\n\n\treturn internal_set(source, new_value, legacy_updates);\n}\n\n/**\n * @template V\n * @param {Source} source\n * @param {V} value\n * @param {Effect[] | null} [updated_during_traversal]\n * @returns {V}\n */\nexport function internal_set(source, value, updated_during_traversal = null) {\n\tif (!source.equals(value)) {\n\t\told_values.set(source, is_destroying_effect ? value : source.v);\n\n\t\tvar batch = Batch.ensure();\n\t\tbatch.capture(source, value);\n\n\t\tif (DEV) {\n\t\t\tif (tracing_mode_flag || active_effect !== null) {\n\t\t\t\tsource.updated ??= new Map();\n\n\t\t\t\t// For performance reasons, when not using $inspect.trace, we only start collecting stack traces\n\t\t\t\t// after the same source has been updated more than 5 times in the same flush cycle.\n\t\t\t\tconst count = (source.updated.get('')?.count ?? 0) + 1;\n\t\t\t\tsource.updated.set('', { error: /** @type {any} */ (null), count });\n\n\t\t\t\tif (tracing_mode_flag || count > 5) {\n\t\t\t\t\tconst error = get_error('updated at');\n\n\t\t\t\t\tif (error !== null) {\n\t\t\t\t\t\tlet entry = source.updated.get(error.stack);\n\n\t\t\t\t\t\tif (!entry) {\n\t\t\t\t\t\t\tentry = { error, count: 0 };\n\t\t\t\t\t\t\tsource.updated.set(error.stack, entry);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tentry.count++;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (active_effect !== null) {\n\t\t\t\tsource.set_during_effect = true;\n\t\t\t}\n\t\t}\n\n\t\tif ((source.f & DERIVED) !== 0) {\n\t\t\tconst derived = /** @type {Derived} */ (source);\n\n\t\t\t// if we are assigning to a dirty derived we set it to clean/maybe dirty but we also eagerly execute it to track the dependencies\n\t\t\tif ((source.f & DIRTY) !== 0) {\n\t\t\t\texecute_derived(derived);\n\t\t\t}\n\n\t\t\t// During time traveling we don't want to reset the status so that\n\t\t\t// traversal of the graph in the other batches still happens\n\t\t\tif (batch_values === null) {\n\t\t\t\tupdate_derived_status(derived);\n\t\t\t}\n\t\t}\n\n\t\tsource.wv = increment_write_version();\n\n\t\t// For debugging, in case you want to know which reactions are being scheduled:\n\t\t// log_reactions(source);\n\t\tmark_reactions(source, DIRTY, updated_during_traversal);\n\n\t\t// It's possible that the current reaction might not have up-to-date dependencies\n\t\t// whilst it's actively running. So in the case of ensuring it registers the reaction\n\t\t// properly for itself, we need to ensure the current effect actually gets\n\t\t// scheduled. i.e: `$effect(() => x++)`\n\t\tif (\n\t\t\tis_runes() &&\n\t\t\tactive_effect !== null &&\n\t\t\t(active_effect.f & CLEAN) !== 0 &&\n\t\t\t(active_effect.f & (BRANCH_EFFECT | ROOT_EFFECT)) === 0\n\t\t) {\n\t\t\tif (untracked_writes === null) {\n\t\t\t\tset_untracked_writes([source]);\n\t\t\t} else {\n\t\t\t\tuntracked_writes.push(source);\n\t\t\t}\n\t\t}\n\n\t\tif (!batch.is_fork && eager_effects.size > 0 && !eager_effects_deferred) {\n\t\t\tflush_eager_effects();\n\t\t}\n\t}\n\n\treturn value;\n}\n\nexport function flush_eager_effects() {\n\teager_effects_deferred = false;\n\n\tfor (const effect of eager_effects) {\n\t\t// Mark clean inspect-effects as maybe dirty and then check their dirtiness\n\t\t// instead of just updating the effects - this way we avoid overfiring.\n\t\tif ((effect.f & CLEAN) !== 0) {\n\t\t\tset_signal_status(effect, MAYBE_DIRTY);\n\t\t}\n\n\t\tif (is_dirty(effect)) {\n\t\t\tupdate_effect(effect);\n\t\t}\n\t}\n\n\teager_effects.clear();\n}\n\n/**\n * @template {number | bigint} T\n * @param {Source} source\n * @param {1 | -1} [d]\n * @returns {T}\n */\nexport function update(source, d = 1) {\n\tvar value = get(source);\n\tvar result = d === 1 ? value++ : value--;\n\n\tset(source, value);\n\n\t// @ts-expect-error\n\treturn result;\n}\n\n/**\n * @template {number | bigint} T\n * @param {Source} source\n * @param {1 | -1} [d]\n * @returns {T}\n */\nexport function update_pre(source, d = 1) {\n\tvar value = get(source);\n\n\t// @ts-expect-error\n\t// eslint-disable-next-line no-useless-assignment -- `++`/`--` used for return value, not side effect on `value`\n\treturn set(source, d === 1 ? ++value : --value);\n}\n\n/**\n * Silently (without using `get`) increment a source\n * @param {Source} source\n */\nexport function increment(source) {\n\tset(source, source.v + 1);\n}\n\n/**\n * @param {Value} signal\n * @param {number} status should be DIRTY or MAYBE_DIRTY\n * @param {Effect[] | null} updated_during_traversal\n * @returns {void}\n */\nfunction mark_reactions(signal, status, updated_during_traversal) {\n\tvar reactions = signal.reactions;\n\tif (reactions === null) return;\n\n\tvar runes = is_runes();\n\tvar length = reactions.length;\n\n\tfor (var i = 0; i < length; i++) {\n\t\tvar reaction = reactions[i];\n\t\tvar flags = reaction.f;\n\n\t\t// In legacy mode, skip the current effect to prevent infinite loops\n\t\tif (!runes && reaction === active_effect) continue;\n\n\t\t// Inspect effects need to run immediately, so that the stack trace makes sense\n\t\tif (DEV && (flags & EAGER_EFFECT) !== 0) {\n\t\t\teager_effects.add(reaction);\n\t\t\tcontinue;\n\t\t}\n\n\t\tvar not_dirty = (flags & DIRTY) === 0;\n\n\t\t// don't set a DIRTY reaction to MAYBE_DIRTY\n\t\tif (not_dirty) {\n\t\t\tset_signal_status(reaction, status);\n\t\t}\n\n\t\tif ((flags & DERIVED) !== 0) {\n\t\t\tvar derived = /** @type {Derived} */ (reaction);\n\n\t\t\tbatch_values?.delete(derived);\n\n\t\t\tif ((flags & WAS_MARKED) === 0) {\n\t\t\t\t// Only connected deriveds being executed outside the update cycle can be reliably unmarked right away\n\t\t\t\tif (\n\t\t\t\t\tflags & CONNECTED &&\n\t\t\t\t\t(active_effect === null || (active_effect.f & REACTION_IS_UPDATING) === 0)\n\t\t\t\t) {\n\t\t\t\t\treaction.f |= WAS_MARKED;\n\t\t\t\t}\n\n\t\t\t\tmark_reactions(derived, MAYBE_DIRTY, updated_during_traversal);\n\t\t\t}\n\t\t} else if (not_dirty) {\n\t\t\tvar effect = /** @type {Effect} */ (reaction);\n\n\t\t\tif ((flags & BLOCK_EFFECT) !== 0 && eager_block_effects !== null) {\n\t\t\t\teager_block_effects.add(effect);\n\t\t\t}\n\n\t\t\tif (updated_during_traversal !== null) {\n\t\t\t\tupdated_during_traversal.push(effect);\n\t\t\t} else {\n\t\t\t\tschedule_effect(effect);\n\t\t\t}\n\t\t}\n\t}\n}\n","/** @import { Source } from '#client' */\nimport { DEV } from 'esm-env';\nimport {\n\tget,\n\tactive_effect,\n\tupdate_version,\n\tactive_reaction,\n\tset_update_version,\n\tset_active_reaction\n} from './runtime.js';\nimport {\n\tarray_prototype,\n\tget_descriptor,\n\tget_prototype_of,\n\tis_array,\n\tobject_prototype\n} from '../shared/utils.js';\nimport {\n\tstate as source,\n\tset,\n\tincrement,\n\tflush_eager_effects,\n\tset_eager_effects_deferred\n} from './reactivity/sources.js';\nimport { PROXY_PATH_SYMBOL, STATE_SYMBOL } from '#client/constants';\nimport { UNINITIALIZED } from '../../constants.js';\nimport * as e from './errors.js';\nimport { tag } from './dev/tracing.js';\nimport { get_error } from '../shared/dev.js';\nimport { tracing_mode_flag } from '../flags/index.js';\n\n// TODO move all regexes into shared module?\nconst regex_is_valid_identifier = /^[a-zA-Z_$][a-zA-Z_$0-9]*$/;\n\n/**\n * @template T\n * @param {T} value\n * @returns {T}\n */\nexport function proxy(value) {\n\t// if non-proxyable, or is already a proxy, return `value`\n\tif (typeof value !== 'object' || value === null || STATE_SYMBOL in value) {\n\t\treturn value;\n\t}\n\n\tconst prototype = get_prototype_of(value);\n\n\tif (prototype !== object_prototype && prototype !== array_prototype) {\n\t\treturn value;\n\t}\n\n\t/** @type {Map>} */\n\tvar sources = new Map();\n\tvar is_proxied_array = is_array(value);\n\tvar version = source(0);\n\n\tvar stack = DEV && tracing_mode_flag ? get_error('created at') : null;\n\tvar parent_version = update_version;\n\n\t/**\n\t * Executes the proxy in the context of the reaction it was originally created in, if any\n\t * @template T\n\t * @param {() => T} fn\n\t */\n\tvar with_parent = (fn) => {\n\t\tif (update_version === parent_version) {\n\t\t\treturn fn();\n\t\t}\n\n\t\t// child source is being created after the initial proxy —\n\t\t// prevent it from being associated with the current reaction\n\t\tvar reaction = active_reaction;\n\t\tvar version = update_version;\n\n\t\tset_active_reaction(null);\n\t\tset_update_version(parent_version);\n\n\t\tvar result = fn();\n\n\t\tset_active_reaction(reaction);\n\t\tset_update_version(version);\n\n\t\treturn result;\n\t};\n\n\tif (is_proxied_array) {\n\t\t// We need to create the length source eagerly to ensure that\n\t\t// mutations to the array are properly synced with our proxy\n\t\tsources.set('length', source(/** @type {any[]} */ (value).length, stack));\n\t\tif (DEV) {\n\t\t\tvalue = /** @type {any} */ (inspectable_array(/** @type {any[]} */ (value)));\n\t\t}\n\t}\n\n\t/** Used in dev for $inspect.trace() */\n\tvar path = '';\n\tlet updating = false;\n\t/** @param {string} new_path */\n\tfunction update_path(new_path) {\n\t\tif (updating) return;\n\t\tupdating = true;\n\t\tpath = new_path;\n\n\t\ttag(version, `${path} version`);\n\n\t\t// rename all child sources and child proxies\n\t\tfor (const [prop, source] of sources) {\n\t\t\ttag(source, get_label(path, prop));\n\t\t}\n\t\tupdating = false;\n\t}\n\n\treturn new Proxy(/** @type {any} */ (value), {\n\t\tdefineProperty(_, prop, descriptor) {\n\t\t\tif (\n\t\t\t\t!('value' in descriptor) ||\n\t\t\t\tdescriptor.configurable === false ||\n\t\t\t\tdescriptor.enumerable === false ||\n\t\t\t\tdescriptor.writable === false\n\t\t\t) {\n\t\t\t\t// we disallow non-basic descriptors, because unless they are applied to the\n\t\t\t\t// target object — which we avoid, so that state can be forked — we will run\n\t\t\t\t// afoul of the various invariants\n\t\t\t\t// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Proxy/Proxy/getOwnPropertyDescriptor#invariants\n\t\t\t\te.state_descriptors_fixed();\n\t\t\t}\n\t\t\tvar s = sources.get(prop);\n\t\t\tif (s === undefined) {\n\t\t\t\twith_parent(() => {\n\t\t\t\t\tvar s = source(descriptor.value, stack);\n\t\t\t\t\tsources.set(prop, s);\n\t\t\t\t\tif (DEV && typeof prop === 'string') {\n\t\t\t\t\t\ttag(s, get_label(path, prop));\n\t\t\t\t\t}\n\t\t\t\t\treturn s;\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tset(s, descriptor.value, true);\n\t\t\t}\n\n\t\t\treturn true;\n\t\t},\n\n\t\tdeleteProperty(target, prop) {\n\t\t\tvar s = sources.get(prop);\n\n\t\t\tif (s === undefined) {\n\t\t\t\tif (prop in target) {\n\t\t\t\t\tconst s = with_parent(() => source(UNINITIALIZED, stack));\n\t\t\t\t\tsources.set(prop, s);\n\t\t\t\t\tincrement(version);\n\n\t\t\t\t\tif (DEV) {\n\t\t\t\t\t\ttag(s, get_label(path, prop));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tset(s, UNINITIALIZED);\n\t\t\t\tincrement(version);\n\t\t\t}\n\n\t\t\treturn true;\n\t\t},\n\n\t\tget(target, prop, receiver) {\n\t\t\tif (prop === STATE_SYMBOL) {\n\t\t\t\treturn value;\n\t\t\t}\n\n\t\t\tif (DEV && prop === PROXY_PATH_SYMBOL) {\n\t\t\t\treturn update_path;\n\t\t\t}\n\n\t\t\tvar s = sources.get(prop);\n\t\t\tvar exists = prop in target;\n\n\t\t\t// create a source, but only if it's an own property and not a prototype property\n\t\t\tif (s === undefined && (!exists || get_descriptor(target, prop)?.writable)) {\n\t\t\t\ts = with_parent(() => {\n\t\t\t\t\tvar p = proxy(exists ? target[prop] : UNINITIALIZED);\n\t\t\t\t\tvar s = source(p, stack);\n\n\t\t\t\t\tif (DEV) {\n\t\t\t\t\t\ttag(s, get_label(path, prop));\n\t\t\t\t\t}\n\n\t\t\t\t\treturn s;\n\t\t\t\t});\n\n\t\t\t\tsources.set(prop, s);\n\t\t\t}\n\n\t\t\tif (s !== undefined) {\n\t\t\t\tvar v = get(s);\n\t\t\t\treturn v === UNINITIALIZED ? undefined : v;\n\t\t\t}\n\n\t\t\treturn Reflect.get(target, prop, receiver);\n\t\t},\n\n\t\tgetOwnPropertyDescriptor(target, prop) {\n\t\t\tvar descriptor = Reflect.getOwnPropertyDescriptor(target, prop);\n\n\t\t\tif (descriptor && 'value' in descriptor) {\n\t\t\t\tvar s = sources.get(prop);\n\t\t\t\tif (s) descriptor.value = get(s);\n\t\t\t} else if (descriptor === undefined) {\n\t\t\t\tvar source = sources.get(prop);\n\t\t\t\tvar value = source?.v;\n\n\t\t\t\tif (source !== undefined && value !== UNINITIALIZED) {\n\t\t\t\t\treturn {\n\t\t\t\t\t\tenumerable: true,\n\t\t\t\t\t\tconfigurable: true,\n\t\t\t\t\t\tvalue,\n\t\t\t\t\t\twritable: true\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn descriptor;\n\t\t},\n\n\t\thas(target, prop) {\n\t\t\tif (prop === STATE_SYMBOL) {\n\t\t\t\treturn true;\n\t\t\t}\n\n\t\t\tvar s = sources.get(prop);\n\t\t\tvar has = (s !== undefined && s.v !== UNINITIALIZED) || Reflect.has(target, prop);\n\n\t\t\tif (\n\t\t\t\ts !== undefined ||\n\t\t\t\t(active_effect !== null && (!has || get_descriptor(target, prop)?.writable))\n\t\t\t) {\n\t\t\t\tif (s === undefined) {\n\t\t\t\t\ts = with_parent(() => {\n\t\t\t\t\t\tvar p = has ? proxy(target[prop]) : UNINITIALIZED;\n\t\t\t\t\t\tvar s = source(p, stack);\n\n\t\t\t\t\t\tif (DEV) {\n\t\t\t\t\t\t\ttag(s, get_label(path, prop));\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn s;\n\t\t\t\t\t});\n\n\t\t\t\t\tsources.set(prop, s);\n\t\t\t\t}\n\n\t\t\t\tvar value = get(s);\n\t\t\t\tif (value === UNINITIALIZED) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn has;\n\t\t},\n\n\t\tset(target, prop, value, receiver) {\n\t\t\tvar s = sources.get(prop);\n\t\t\tvar has = prop in target;\n\n\t\t\t// variable.length = value -> clear all signals with index >= value\n\t\t\tif (is_proxied_array && prop === 'length') {\n\t\t\t\tfor (var i = value; i < /** @type {Source} */ (s).v; i += 1) {\n\t\t\t\t\tvar other_s = sources.get(i + '');\n\t\t\t\t\tif (other_s !== undefined) {\n\t\t\t\t\t\tset(other_s, UNINITIALIZED);\n\t\t\t\t\t} else if (i in target) {\n\t\t\t\t\t\t// If the item exists in the original, we need to create an uninitialized source,\n\t\t\t\t\t\t// else a later read of the property would result in a source being created with\n\t\t\t\t\t\t// the value of the original item at that index.\n\t\t\t\t\t\tother_s = with_parent(() => source(UNINITIALIZED, stack));\n\t\t\t\t\t\tsources.set(i + '', other_s);\n\n\t\t\t\t\t\tif (DEV) {\n\t\t\t\t\t\t\ttag(other_s, get_label(path, i));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If we haven't yet created a source for this property, we need to ensure\n\t\t\t// we do so otherwise if we read it later, then the write won't be tracked and\n\t\t\t// the heuristics of effects will be different vs if we had read the proxied\n\t\t\t// object property before writing to that property.\n\t\t\tif (s === undefined) {\n\t\t\t\tif (!has || get_descriptor(target, prop)?.writable) {\n\t\t\t\t\ts = with_parent(() => source(undefined, stack));\n\n\t\t\t\t\tif (DEV) {\n\t\t\t\t\t\ttag(s, get_label(path, prop));\n\t\t\t\t\t}\n\t\t\t\t\tset(s, proxy(value));\n\n\t\t\t\t\tsources.set(prop, s);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thas = s.v !== UNINITIALIZED;\n\n\t\t\t\tvar p = with_parent(() => proxy(value));\n\t\t\t\tset(s, p);\n\t\t\t}\n\n\t\t\tvar descriptor = Reflect.getOwnPropertyDescriptor(target, prop);\n\n\t\t\t// Set the new value before updating any signals so that any listeners get the new value\n\t\t\tif (descriptor?.set) {\n\t\t\t\tdescriptor.set.call(receiver, value);\n\t\t\t}\n\n\t\t\tif (!has) {\n\t\t\t\t// If we have mutated an array directly, we might need to\n\t\t\t\t// signal that length has also changed. Do it before updating metadata\n\t\t\t\t// to ensure that iterating over the array as a result of a metadata update\n\t\t\t\t// will not cause the length to be out of sync.\n\t\t\t\tif (is_proxied_array && typeof prop === 'string') {\n\t\t\t\t\tvar ls = /** @type {Source} */ (sources.get('length'));\n\t\t\t\t\tvar n = Number(prop);\n\n\t\t\t\t\tif (Number.isInteger(n) && n >= ls.v) {\n\t\t\t\t\t\tset(ls, n + 1);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tincrement(version);\n\t\t\t}\n\n\t\t\treturn true;\n\t\t},\n\n\t\townKeys(target) {\n\t\t\tget(version);\n\n\t\t\tvar own_keys = Reflect.ownKeys(target).filter((key) => {\n\t\t\t\tvar source = sources.get(key);\n\t\t\t\treturn source === undefined || source.v !== UNINITIALIZED;\n\t\t\t});\n\n\t\t\tfor (var [key, source] of sources) {\n\t\t\t\tif (source.v !== UNINITIALIZED && !(key in target)) {\n\t\t\t\t\town_keys.push(key);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn own_keys;\n\t\t},\n\n\t\tsetPrototypeOf() {\n\t\t\te.state_prototype_fixed();\n\t\t}\n\t});\n}\n\n/**\n * @param {string} path\n * @param {string | symbol} prop\n */\nfunction get_label(path, prop) {\n\tif (typeof prop === 'symbol') return `${path}[Symbol(${prop.description ?? ''})]`;\n\tif (regex_is_valid_identifier.test(prop)) return `${path}.${prop}`;\n\treturn /^\\d+$/.test(prop) ? `${path}[${prop}]` : `${path}['${prop}']`;\n}\n\n/**\n * @param {any} value\n */\nexport function get_proxied_value(value) {\n\ttry {\n\t\tif (value !== null && typeof value === 'object' && STATE_SYMBOL in value) {\n\t\t\treturn value[STATE_SYMBOL];\n\t\t}\n\t} catch {\n\t\t// the above if check can throw an error if the value in question\n\t\t// is the contentWindow of an iframe on another domain, in which\n\t\t// case we want to just return the value (because it's definitely\n\t\t// not a proxied value) so we don't break any JavaScript interacting\n\t\t// with that iframe (such as various payment companies client side\n\t\t// JavaScript libraries interacting with their iframes on the same\n\t\t// domain)\n\t}\n\n\treturn value;\n}\n\n/**\n * @param {any} a\n * @param {any} b\n */\nexport function is(a, b) {\n\treturn Object.is(get_proxied_value(a), get_proxied_value(b));\n}\n\nconst ARRAY_MUTATING_METHODS = new Set([\n\t'copyWithin',\n\t'fill',\n\t'pop',\n\t'push',\n\t'reverse',\n\t'shift',\n\t'sort',\n\t'splice',\n\t'unshift'\n]);\n\n/**\n * Wrap array mutating methods so $inspect is triggered only once and\n * to prevent logging an array in intermediate state (e.g. with an empty slot)\n * @param {any[]} array\n */\nfunction inspectable_array(array) {\n\treturn new Proxy(array, {\n\t\tget(target, prop, receiver) {\n\t\t\tvar value = Reflect.get(target, prop, receiver);\n\t\t\tif (!ARRAY_MUTATING_METHODS.has(/** @type {string} */ (prop))) {\n\t\t\t\treturn value;\n\t\t\t}\n\n\t\t\t/**\n\t\t\t * @this {any[]}\n\t\t\t * @param {any[]} args\n\t\t\t */\n\t\t\treturn function (...args) {\n\t\t\t\tset_eager_effects_deferred();\n\t\t\t\tvar result = value.apply(this, args);\n\t\t\t\tflush_eager_effects();\n\t\t\t\treturn result;\n\t\t\t};\n\t\t}\n\t});\n}\n","/** @import { Effect, TemplateNode } from '#client' */\nimport { hydrate_node, hydrating, set_hydrate_node } from './hydration.js';\nimport { DEV } from 'esm-env';\nimport { init_array_prototype_warnings } from '../dev/equality.js';\nimport { get_descriptor, is_extensible } from '../../shared/utils.js';\nimport { active_effect } from '../runtime.js';\nimport { async_mode_flag } from '../../flags/index.js';\nimport { TEXT_NODE, REACTION_RAN } from '#client/constants';\nimport { eager_block_effects } from '../reactivity/batch.js';\nimport { NAMESPACE_HTML } from '../../../constants.js';\n\n// export these for reference in the compiled code, making global name deduplication unnecessary\n/** @type {Window} */\nexport var $window;\n\n/** @type {Document} */\nexport var $document;\n\n/** @type {boolean} */\nexport var is_firefox;\n\n/** @type {() => Node | null} */\nvar first_child_getter;\n/** @type {() => Node | null} */\nvar next_sibling_getter;\n\n/**\n * Initialize these lazily to avoid issues when using the runtime in a server context\n * where these globals are not available while avoiding a separate server entry point\n */\nexport function init_operations() {\n\tif ($window !== undefined) {\n\t\treturn;\n\t}\n\n\t$window = window;\n\t$document = document;\n\tis_firefox = /Firefox/.test(navigator.userAgent);\n\n\tvar element_prototype = Element.prototype;\n\tvar node_prototype = Node.prototype;\n\tvar text_prototype = Text.prototype;\n\n\t// @ts-ignore\n\tfirst_child_getter = get_descriptor(node_prototype, 'firstChild').get;\n\t// @ts-ignore\n\tnext_sibling_getter = get_descriptor(node_prototype, 'nextSibling').get;\n\n\tif (is_extensible(element_prototype)) {\n\t\t// the following assignments improve perf of lookups on DOM nodes\n\t\t// @ts-expect-error\n\t\telement_prototype.__click = undefined;\n\t\t// @ts-expect-error\n\t\telement_prototype.__className = undefined;\n\t\t// @ts-expect-error\n\t\telement_prototype.__attributes = null;\n\t\t// @ts-expect-error\n\t\telement_prototype.__style = undefined;\n\t\t// @ts-expect-error\n\t\telement_prototype.__e = undefined;\n\t}\n\n\tif (is_extensible(text_prototype)) {\n\t\t// @ts-expect-error\n\t\ttext_prototype.__t = undefined;\n\t}\n\n\tif (DEV) {\n\t\t// @ts-expect-error\n\t\telement_prototype.__svelte_meta = null;\n\n\t\tinit_array_prototype_warnings();\n\t}\n}\n\n/**\n * @param {string} value\n * @returns {Text}\n */\nexport function create_text(value = '') {\n\treturn document.createTextNode(value);\n}\n\n/**\n * @template {Node} N\n * @param {N} node\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function get_first_child(node) {\n\treturn /** @type {TemplateNode | null} */ (first_child_getter.call(node));\n}\n\n/**\n * @template {Node} N\n * @param {N} node\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function get_next_sibling(node) {\n\treturn /** @type {TemplateNode | null} */ (next_sibling_getter.call(node));\n}\n\n/**\n * Don't mark this as side-effect-free, hydration needs to walk all nodes\n * @template {Node} N\n * @param {N} node\n * @param {boolean} is_text\n * @returns {TemplateNode | null}\n */\nexport function child(node, is_text) {\n\tif (!hydrating) {\n\t\treturn get_first_child(node);\n\t}\n\n\tvar child = get_first_child(hydrate_node);\n\n\t// Child can be null if we have an element with a single child, like `

                {text}

                `, where `text` is empty\n\tif (child === null) {\n\t\tchild = hydrate_node.appendChild(create_text());\n\t} else if (is_text && child.nodeType !== TEXT_NODE) {\n\t\tvar text = create_text();\n\t\tchild?.before(text);\n\t\tset_hydrate_node(text);\n\t\treturn text;\n\t}\n\n\tif (is_text) {\n\t\tmerge_text_nodes(/** @type {Text} */ (child));\n\t}\n\n\tset_hydrate_node(child);\n\treturn child;\n}\n\n/**\n * Don't mark this as side-effect-free, hydration needs to walk all nodes\n * @param {TemplateNode} node\n * @param {boolean} [is_text]\n * @returns {TemplateNode | null}\n */\nexport function first_child(node, is_text = false) {\n\tif (!hydrating) {\n\t\tvar first = get_first_child(node);\n\n\t\t// TODO prevent user comments with the empty string when preserveComments is true\n\t\tif (first instanceof Comment && first.data === '') return get_next_sibling(first);\n\n\t\treturn first;\n\t}\n\n\tif (is_text) {\n\t\t// if an {expression} is empty during SSR, there might be no\n\t\t// text node to hydrate — we must therefore create one\n\t\tif (hydrate_node?.nodeType !== TEXT_NODE) {\n\t\t\tvar text = create_text();\n\n\t\t\thydrate_node?.before(text);\n\t\t\tset_hydrate_node(text);\n\t\t\treturn text;\n\t\t}\n\n\t\tmerge_text_nodes(/** @type {Text} */ (hydrate_node));\n\t}\n\n\treturn hydrate_node;\n}\n\n/**\n * Don't mark this as side-effect-free, hydration needs to walk all nodes\n * @param {TemplateNode} node\n * @param {number} count\n * @param {boolean} is_text\n * @returns {TemplateNode | null}\n */\nexport function sibling(node, count = 1, is_text = false) {\n\tlet next_sibling = hydrating ? hydrate_node : node;\n\tvar last_sibling;\n\n\twhile (count--) {\n\t\tlast_sibling = next_sibling;\n\t\tnext_sibling = /** @type {TemplateNode} */ (get_next_sibling(next_sibling));\n\t}\n\n\tif (!hydrating) {\n\t\treturn next_sibling;\n\t}\n\n\tif (is_text) {\n\t\t// if a sibling {expression} is empty during SSR, there might be no\n\t\t// text node to hydrate — we must therefore create one\n\t\tif (next_sibling?.nodeType !== TEXT_NODE) {\n\t\t\tvar text = create_text();\n\t\t\t// If the next sibling is `null` and we're handling text then it's because\n\t\t\t// the SSR content was empty for the text, so we need to generate a new text\n\t\t\t// node and insert it after the last sibling\n\t\t\tif (next_sibling === null) {\n\t\t\t\tlast_sibling?.after(text);\n\t\t\t} else {\n\t\t\t\tnext_sibling.before(text);\n\t\t\t}\n\t\t\tset_hydrate_node(text);\n\t\t\treturn text;\n\t\t}\n\n\t\tmerge_text_nodes(/** @type {Text} */ (next_sibling));\n\t}\n\n\tset_hydrate_node(next_sibling);\n\treturn next_sibling;\n}\n\n/**\n * @template {Node} N\n * @param {N} node\n * @returns {void}\n */\nexport function clear_text_content(node) {\n\tnode.textContent = '';\n}\n\n/**\n * Returns `true` if we're updating the current block, for example `condition` in\n * an `{#if condition}` block just changed. In this case, the branch should be\n * appended (or removed) at the same time as other updates within the\n * current ``\n */\nexport function should_defer_append() {\n\tif (!async_mode_flag) return false;\n\tif (eager_block_effects !== null) return false;\n\n\tvar flags = /** @type {Effect} */ (active_effect).f;\n\treturn (flags & REACTION_RAN) !== 0;\n}\n\n/**\n * @template {keyof HTMLElementTagNameMap | string} T\n * @param {T} tag\n * @param {string} [namespace]\n * @param {string} [is]\n * @returns {T extends keyof HTMLElementTagNameMap ? HTMLElementTagNameMap[T] : Element}\n */\nexport function create_element(tag, namespace, is) {\n\tlet options = is ? { is } : undefined;\n\treturn /** @type {T extends keyof HTMLElementTagNameMap ? HTMLElementTagNameMap[T] : Element} */ (\n\t\tdocument.createElementNS(namespace ?? NAMESPACE_HTML, tag, options)\n\t);\n}\n\nexport function create_fragment() {\n\treturn document.createDocumentFragment();\n}\n\n/**\n * @param {string} data\n * @returns\n */\nexport function create_comment(data = '') {\n\treturn document.createComment(data);\n}\n\n/**\n * @param {Element} element\n * @param {string} key\n * @param {string} value\n * @returns\n */\nexport function set_attribute(element, key, value = '') {\n\tif (key.startsWith('xlink:')) {\n\t\telement.setAttributeNS('http://www.w3.org/1999/xlink', key, value);\n\t\treturn;\n\t}\n\treturn element.setAttribute(key, value);\n}\n\n/**\n * Browsers split text nodes larger than 65536 bytes when parsing.\n * For hydration to succeed, we need to stitch them back together\n * @param {Text} text\n */\nexport function merge_text_nodes(text) {\n\tif (/** @type {string} */ (text.nodeValue).length < 65536) {\n\t\treturn;\n\t}\n\n\tlet next = text.nextSibling;\n\n\twhile (next !== null && next.nodeType === TEXT_NODE) {\n\t\tnext.remove();\n\n\t\t/** @type {string} */ (text.nodeValue) += /** @type {string} */ (next.nodeValue);\n\n\t\tnext = text.nextSibling;\n\t}\n}\n","import { teardown } from '../../../reactivity/effects.js';\nimport {\n\tactive_effect,\n\tactive_reaction,\n\tset_active_effect,\n\tset_active_reaction\n} from '../../../runtime.js';\nimport { add_form_reset_listener } from '../misc.js';\n\n/**\n * Fires the handler once immediately (unless corresponding arg is set to `false`),\n * then listens to the given events until the render effect context is destroyed\n * @param {EventTarget} target\n * @param {Array} events\n * @param {(event?: Event) => void} handler\n * @param {any} call_handler_immediately\n */\nexport function listen(target, events, handler, call_handler_immediately = true) {\n\tif (call_handler_immediately) {\n\t\thandler();\n\t}\n\n\tfor (var name of events) {\n\t\ttarget.addEventListener(name, handler);\n\t}\n\n\tteardown(() => {\n\t\tfor (var name of events) {\n\t\t\ttarget.removeEventListener(name, handler);\n\t\t}\n\t});\n}\n\n/**\n * @template T\n * @param {() => T} fn\n */\nexport function without_reactive_context(fn) {\n\tvar previous_reaction = active_reaction;\n\tvar previous_effect = active_effect;\n\tset_active_reaction(null);\n\tset_active_effect(null);\n\ttry {\n\t\treturn fn();\n\t} finally {\n\t\tset_active_reaction(previous_reaction);\n\t\tset_active_effect(previous_effect);\n\t}\n}\n\n/**\n * Listen to the given event, and then instantiate a global form reset listener if not already done,\n * to notify all bindings when the form is reset\n * @param {HTMLElement} element\n * @param {string} event\n * @param {(is_reset?: true) => void} handler\n * @param {(is_reset?: true) => void} [on_reset]\n */\nexport function listen_to_event_and_reset_event(element, event, handler, on_reset = handler) {\n\telement.addEventListener(event, () => without_reactive_context(handler));\n\t// @ts-expect-error\n\tconst prev = element.__on_r;\n\tif (prev) {\n\t\t// special case for checkbox that can have multiple binds (group & checked)\n\t\t// @ts-expect-error\n\t\telement.__on_r = () => {\n\t\t\tprev();\n\t\t\ton_reset(true);\n\t\t};\n\t} else {\n\t\t// @ts-expect-error\n\t\telement.__on_r = () => on_reset(true);\n\t}\n\n\tadd_form_reset_listener();\n}\n","/** @import { Blocker, ComponentContext, ComponentContextLegacy, Derived, Effect, TemplateNode, TransitionManager } from '#client' */\nimport {\n\tis_dirty,\n\tactive_effect,\n\tactive_reaction,\n\tupdate_effect,\n\tget,\n\tis_destroying_effect,\n\tremove_reactions,\n\tset_active_reaction,\n\tset_is_destroying_effect,\n\tuntrack,\n\tuntracking,\n\tset_active_effect\n} from '../runtime.js';\nimport {\n\tDIRTY,\n\tBRANCH_EFFECT,\n\tRENDER_EFFECT,\n\tEFFECT,\n\tDESTROYED,\n\tINERT,\n\tREACTION_RAN,\n\tBLOCK_EFFECT,\n\tROOT_EFFECT,\n\tEFFECT_TRANSPARENT,\n\tDERIVED,\n\tCLEAN,\n\tEAGER_EFFECT,\n\tHEAD_EFFECT,\n\tMAYBE_DIRTY,\n\tEFFECT_PRESERVED,\n\tSTALE_REACTION,\n\tUSER_EFFECT,\n\tASYNC,\n\tCONNECTED,\n\tMANAGED_EFFECT,\n\tDESTROYING\n} from '#client/constants';\nimport * as e from '../errors.js';\nimport { DEV } from 'esm-env';\nimport { define_property } from '../../shared/utils.js';\nimport { get_next_sibling } from '../dom/operations.js';\nimport { component_context, dev_current_component_function, dev_stack } from '../context.js';\nimport { Batch, collected_effects, current_batch } from './batch.js';\nimport { flatten, increment_pending } from './async.js';\nimport { without_reactive_context } from '../dom/elements/bindings/shared.js';\nimport { set_signal_status } from './status.js';\n\n/**\n * @param {'$effect' | '$effect.pre' | '$inspect'} rune\n */\nexport function validate_effect(rune) {\n\tif (active_effect === null) {\n\t\tif (active_reaction === null) {\n\t\t\te.effect_orphan(rune);\n\t\t}\n\n\t\te.effect_in_unowned_derived();\n\t}\n\n\tif (is_destroying_effect) {\n\t\te.effect_in_teardown(rune);\n\t}\n}\n\n/**\n * @param {Effect} effect\n * @param {Effect} parent_effect\n */\nfunction push_effect(effect, parent_effect) {\n\tvar parent_last = parent_effect.last;\n\tif (parent_last === null) {\n\t\tparent_effect.last = parent_effect.first = effect;\n\t} else {\n\t\tparent_last.next = effect;\n\t\teffect.prev = parent_last;\n\t\tparent_effect.last = effect;\n\t}\n}\n\n/**\n * @param {number} type\n * @param {null | (() => void | (() => void))} fn\n * @returns {Effect}\n */\nfunction create_effect(type, fn) {\n\tvar parent = active_effect;\n\n\tif (DEV) {\n\t\t// Ensure the parent is never an inspect effect\n\t\twhile (parent !== null && (parent.f & EAGER_EFFECT) !== 0) {\n\t\t\tparent = parent.parent;\n\t\t}\n\t}\n\n\tif (parent !== null && (parent.f & INERT) !== 0) {\n\t\ttype |= INERT;\n\t}\n\n\t/** @type {Effect} */\n\tvar effect = {\n\t\tctx: component_context,\n\t\tdeps: null,\n\t\tnodes: null,\n\t\tf: type | DIRTY | CONNECTED,\n\t\tfirst: null,\n\t\tfn,\n\t\tlast: null,\n\t\tnext: null,\n\t\tparent,\n\t\tb: parent && parent.b,\n\t\tprev: null,\n\t\tteardown: null,\n\t\twv: 0,\n\t\tac: null\n\t};\n\n\tif (DEV) {\n\t\teffect.component_function = dev_current_component_function;\n\t}\n\n\tcurrent_batch?.register_created_effect(effect);\n\n\t/** @type {Effect | null} */\n\tvar e = effect;\n\n\tif ((type & EFFECT) !== 0) {\n\t\tif (collected_effects !== null) {\n\t\t\t// created during traversal — collect and run afterwards\n\t\t\tcollected_effects.push(effect);\n\t\t} else {\n\t\t\t// schedule for later\n\t\t\tBatch.ensure().schedule(effect);\n\t\t}\n\t} else if (fn !== null) {\n\t\ttry {\n\t\t\tupdate_effect(effect);\n\t\t} catch (e) {\n\t\t\tdestroy_effect(effect);\n\t\t\tthrow e;\n\t\t}\n\n\t\t// if an effect doesn't need to be kept in the tree (because it\n\t\t// won't re-run, has no DOM, and has no teardown etc)\n\t\t// then we skip it and go to its child (if any)\n\t\tif (\n\t\t\te.deps === null &&\n\t\t\te.teardown === null &&\n\t\t\te.nodes === null &&\n\t\t\te.first === e.last && // either `null`, or a singular child\n\t\t\t(e.f & EFFECT_PRESERVED) === 0\n\t\t) {\n\t\t\te = e.first;\n\t\t\tif ((type & BLOCK_EFFECT) !== 0 && (type & EFFECT_TRANSPARENT) !== 0 && e !== null) {\n\t\t\t\te.f |= EFFECT_TRANSPARENT;\n\t\t\t}\n\t\t}\n\t}\n\n\tif (e !== null) {\n\t\te.parent = parent;\n\n\t\tif (parent !== null) {\n\t\t\tpush_effect(e, parent);\n\t\t}\n\n\t\t// if we're in a derived, add the effect there too\n\t\tif (\n\t\t\tactive_reaction !== null &&\n\t\t\t(active_reaction.f & DERIVED) !== 0 &&\n\t\t\t(type & ROOT_EFFECT) === 0\n\t\t) {\n\t\t\tvar derived = /** @type {Derived} */ (active_reaction);\n\t\t\t(derived.effects ??= []).push(e);\n\t\t}\n\t}\n\n\treturn effect;\n}\n\n/**\n * Internal representation of `$effect.tracking()`\n * @returns {boolean}\n */\nexport function effect_tracking() {\n\treturn active_reaction !== null && !untracking;\n}\n\n/**\n * @param {() => void} fn\n */\nexport function teardown(fn) {\n\tconst effect = create_effect(RENDER_EFFECT, null);\n\tset_signal_status(effect, CLEAN);\n\teffect.teardown = fn;\n\treturn effect;\n}\n\n/**\n * Internal representation of `$effect(...)`\n * @param {() => void | (() => void)} fn\n */\nexport function user_effect(fn) {\n\tvalidate_effect('$effect');\n\n\tif (DEV) {\n\t\tdefine_property(fn, 'name', {\n\t\t\tvalue: '$effect'\n\t\t});\n\t}\n\n\t// Non-nested `$effect(...)` in a component should be deferred\n\t// until the component is mounted\n\tvar flags = /** @type {Effect} */ (active_effect).f;\n\tvar defer = !active_reaction && (flags & BRANCH_EFFECT) !== 0 && (flags & REACTION_RAN) === 0;\n\n\tif (defer) {\n\t\t// Top-level `$effect(...)` in an unmounted component — defer until mount\n\t\tvar context = /** @type {ComponentContext} */ (component_context);\n\t\t(context.e ??= []).push(fn);\n\t} else {\n\t\t// Everything else — create immediately\n\t\treturn create_user_effect(fn);\n\t}\n}\n\n/**\n * @param {() => void | (() => void)} fn\n */\nexport function create_user_effect(fn) {\n\treturn create_effect(EFFECT | USER_EFFECT, fn);\n}\n\n/**\n * Internal representation of `$effect.pre(...)`\n * @param {() => void | (() => void)} fn\n * @returns {Effect}\n */\nexport function user_pre_effect(fn) {\n\tvalidate_effect('$effect.pre');\n\tif (DEV) {\n\t\tdefine_property(fn, 'name', {\n\t\t\tvalue: '$effect.pre'\n\t\t});\n\t}\n\treturn create_effect(RENDER_EFFECT | USER_EFFECT, fn);\n}\n\n/** @param {() => void | (() => void)} fn */\nexport function eager_effect(fn) {\n\treturn create_effect(EAGER_EFFECT, fn);\n}\n\n/**\n * Internal representation of `$effect.root(...)`\n * @param {() => void | (() => void)} fn\n * @returns {() => void}\n */\nexport function effect_root(fn) {\n\tBatch.ensure();\n\tconst effect = create_effect(ROOT_EFFECT | EFFECT_PRESERVED, fn);\n\n\treturn () => {\n\t\tdestroy_effect(effect);\n\t};\n}\n\n/**\n * An effect root whose children can transition out\n * @param {() => void} fn\n * @returns {(options?: { outro?: boolean }) => Promise}\n */\nexport function component_root(fn) {\n\tBatch.ensure();\n\tconst effect = create_effect(ROOT_EFFECT | EFFECT_PRESERVED, fn);\n\n\treturn (options = {}) => {\n\t\treturn new Promise((fulfil) => {\n\t\t\tif (options.outro) {\n\t\t\t\tpause_effect(effect, () => {\n\t\t\t\t\tdestroy_effect(effect);\n\t\t\t\t\tfulfil(undefined);\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tdestroy_effect(effect);\n\t\t\t\tfulfil(undefined);\n\t\t\t}\n\t\t});\n\t};\n}\n\n/**\n * @param {() => void | (() => void)} fn\n * @returns {Effect}\n */\nexport function effect(fn) {\n\treturn create_effect(EFFECT, fn);\n}\n\n/**\n * Internal representation of `$: ..`\n * @param {() => any} deps\n * @param {() => void | (() => void)} fn\n */\nexport function legacy_pre_effect(deps, fn) {\n\tvar context = /** @type {ComponentContextLegacy} */ (component_context);\n\n\t/** @type {{ effect: null | Effect, ran: boolean, deps: () => any }} */\n\tvar token = { effect: null, ran: false, deps };\n\n\tcontext.l.$.push(token);\n\n\ttoken.effect = render_effect(() => {\n\t\tdeps();\n\n\t\t// If this legacy pre effect has already run before the end of the reset, then\n\t\t// bail out to emulate the same behavior.\n\t\tif (token.ran) return;\n\n\t\ttoken.ran = true;\n\n\t\tvar effect = /** @type {Effect} */ (active_effect);\n\n\t\t// here, we lie: by setting `active_effect` to be the parent branch, any writes\n\t\t// that happen inside `fn` will _not_ cause an unnecessary reschedule, because\n\t\t// the affected effects will be children of `active_effect`. this is safe\n\t\t// because these effects are known to run in the correct order\n\t\ttry {\n\t\t\tset_active_effect(effect.parent);\n\t\t\tuntrack(fn);\n\t\t} finally {\n\t\t\tset_active_effect(effect);\n\t\t}\n\t});\n}\n\nexport function legacy_pre_effect_reset() {\n\tvar context = /** @type {ComponentContextLegacy} */ (component_context);\n\n\trender_effect(() => {\n\t\t// Run dirty `$:` statements\n\t\tfor (var token of context.l.$) {\n\t\t\ttoken.deps();\n\n\t\t\tvar effect = token.effect;\n\n\t\t\t// If the effect is CLEAN, then make it MAYBE_DIRTY. This ensures we traverse through\n\t\t\t// the effects dependencies and correctly ensure each dependency is up-to-date.\n\t\t\tif ((effect.f & CLEAN) !== 0 && effect.deps !== null) {\n\t\t\t\tset_signal_status(effect, MAYBE_DIRTY);\n\t\t\t}\n\n\t\t\tif (is_dirty(effect)) {\n\t\t\t\tupdate_effect(effect);\n\t\t\t}\n\n\t\t\ttoken.ran = false;\n\t\t}\n\t});\n}\n\n/**\n * @param {() => void | (() => void)} fn\n * @returns {Effect}\n */\nexport function async_effect(fn) {\n\treturn create_effect(ASYNC | EFFECT_PRESERVED, fn);\n}\n\n/**\n * @param {() => void | (() => void)} fn\n * @returns {Effect}\n */\nexport function render_effect(fn, flags = 0) {\n\treturn create_effect(RENDER_EFFECT | flags, fn);\n}\n\n/**\n * @param {(...expressions: any) => void | (() => void)} fn\n * @param {Array<() => any>} sync\n * @param {Array<() => Promise>} async\n * @param {Blocker[]} blockers\n */\nexport function template_effect(fn, sync = [], async = [], blockers = []) {\n\tflatten(blockers, sync, async, (values) => {\n\t\tcreate_effect(RENDER_EFFECT, () => fn(...values.map(get)));\n\t});\n}\n\n/**\n * Like `template_effect`, but with an effect which is deferred until the batch commits\n * @param {(...expressions: any) => void | (() => void)} fn\n * @param {Array<() => any>} sync\n * @param {Array<() => Promise>} async\n * @param {Blocker[]} blockers\n */\nexport function deferred_template_effect(fn, sync = [], async = [], blockers = []) {\n\tif (async.length > 0 || blockers.length > 0) {\n\t\tvar decrement_pending = increment_pending();\n\t}\n\n\tflatten(blockers, sync, async, (values) => {\n\t\tcreate_effect(EFFECT, () => fn(...values.map(get)));\n\n\t\tif (decrement_pending) {\n\t\t\tdecrement_pending();\n\t\t}\n\t});\n}\n\n/**\n * @param {(() => void)} fn\n * @param {number} flags\n */\nexport function block(fn, flags = 0) {\n\tvar effect = create_effect(BLOCK_EFFECT | flags, fn);\n\tif (DEV) {\n\t\teffect.dev_stack = dev_stack;\n\t}\n\treturn effect;\n}\n\n/**\n * @param {(() => void)} fn\n * @param {number} flags\n */\nexport function managed(fn, flags = 0) {\n\tvar effect = create_effect(MANAGED_EFFECT | flags, fn);\n\tif (DEV) {\n\t\teffect.dev_stack = dev_stack;\n\t}\n\treturn effect;\n}\n\n/**\n * @param {(() => void)} fn\n */\nexport function branch(fn) {\n\treturn create_effect(BRANCH_EFFECT | EFFECT_PRESERVED, fn);\n}\n\n/**\n * @param {Effect} effect\n */\nexport function execute_effect_teardown(effect) {\n\tvar teardown = effect.teardown;\n\tif (teardown !== null) {\n\t\tconst previously_destroying_effect = is_destroying_effect;\n\t\tconst previous_reaction = active_reaction;\n\t\tset_is_destroying_effect(true);\n\t\tset_active_reaction(null);\n\t\ttry {\n\t\t\tteardown.call(null);\n\t\t} finally {\n\t\t\tset_is_destroying_effect(previously_destroying_effect);\n\t\t\tset_active_reaction(previous_reaction);\n\t\t}\n\t}\n}\n\n/**\n * @param {Effect} signal\n * @param {boolean} remove_dom\n * @returns {void}\n */\nexport function destroy_effect_children(signal, remove_dom = false) {\n\tvar effect = signal.first;\n\tsignal.first = signal.last = null;\n\n\twhile (effect !== null) {\n\t\tconst controller = effect.ac;\n\n\t\tif (controller !== null) {\n\t\t\twithout_reactive_context(() => {\n\t\t\t\tcontroller.abort(STALE_REACTION);\n\t\t\t});\n\t\t}\n\n\t\tvar next = effect.next;\n\n\t\tif ((effect.f & ROOT_EFFECT) !== 0) {\n\t\t\t// this is now an independent root\n\t\t\teffect.parent = null;\n\t\t} else {\n\t\t\tdestroy_effect(effect, remove_dom);\n\t\t}\n\n\t\teffect = next;\n\t}\n}\n\n/**\n * @param {Effect} signal\n * @returns {void}\n */\nexport function destroy_block_effect_children(signal) {\n\tvar effect = signal.first;\n\n\twhile (effect !== null) {\n\t\tvar next = effect.next;\n\t\tif ((effect.f & BRANCH_EFFECT) === 0) {\n\t\t\tdestroy_effect(effect);\n\t\t}\n\t\teffect = next;\n\t}\n}\n\n/**\n * @param {Effect} effect\n * @param {boolean} [remove_dom]\n * @returns {void}\n */\nexport function destroy_effect(effect, remove_dom = true) {\n\tvar removed = false;\n\n\tif (\n\t\t(remove_dom || (effect.f & HEAD_EFFECT) !== 0) &&\n\t\teffect.nodes !== null &&\n\t\teffect.nodes.end !== null\n\t) {\n\t\tremove_effect_dom(effect.nodes.start, /** @type {TemplateNode} */ (effect.nodes.end));\n\t\tremoved = true;\n\t}\n\n\tset_signal_status(effect, DESTROYING);\n\tdestroy_effect_children(effect, remove_dom && !removed);\n\tremove_reactions(effect, 0);\n\n\tvar transitions = effect.nodes && effect.nodes.t;\n\n\tif (transitions !== null) {\n\t\tfor (const transition of transitions) {\n\t\t\ttransition.stop();\n\t\t}\n\t}\n\n\texecute_effect_teardown(effect);\n\n\teffect.f ^= DESTROYING;\n\teffect.f |= DESTROYED;\n\n\tvar parent = effect.parent;\n\n\t// If the parent doesn't have any children, then skip this work altogether\n\tif (parent !== null && parent.first !== null) {\n\t\tunlink_effect(effect);\n\t}\n\n\tif (DEV) {\n\t\teffect.component_function = null;\n\t}\n\n\t// `first` and `child` are nulled out in destroy_effect_children\n\t// we don't null out `parent` so that error propagation can work correctly\n\teffect.next =\n\t\teffect.prev =\n\t\teffect.teardown =\n\t\teffect.ctx =\n\t\teffect.deps =\n\t\teffect.fn =\n\t\teffect.nodes =\n\t\teffect.ac =\n\t\teffect.b =\n\t\t\tnull;\n}\n\n/**\n *\n * @param {TemplateNode | null} node\n * @param {TemplateNode} end\n */\nexport function remove_effect_dom(node, end) {\n\twhile (node !== null) {\n\t\t/** @type {TemplateNode | null} */\n\t\tvar next = node === end ? null : get_next_sibling(node);\n\n\t\tnode.remove();\n\t\tnode = next;\n\t}\n}\n\n/**\n * Detach an effect from the effect tree, freeing up memory and\n * reducing the amount of work that happens on subsequent traversals\n * @param {Effect} effect\n */\nexport function unlink_effect(effect) {\n\tvar parent = effect.parent;\n\tvar prev = effect.prev;\n\tvar next = effect.next;\n\n\tif (prev !== null) prev.next = next;\n\tif (next !== null) next.prev = prev;\n\n\tif (parent !== null) {\n\t\tif (parent.first === effect) parent.first = next;\n\t\tif (parent.last === effect) parent.last = prev;\n\t}\n}\n\n/**\n * When a block effect is removed, we don't immediately destroy it or yank it\n * out of the DOM, because it might have transitions. Instead, we 'pause' it.\n * It stays around (in memory, and in the DOM) until outro transitions have\n * completed, and if the state change is reversed then we _resume_ it.\n * A paused effect does not update, and the DOM subtree becomes inert.\n * @param {Effect} effect\n * @param {() => void} [callback]\n * @param {boolean} [destroy]\n */\nexport function pause_effect(effect, callback, destroy = true) {\n\t/** @type {TransitionManager[]} */\n\tvar transitions = [];\n\n\tpause_children(effect, transitions, true);\n\n\tvar fn = () => {\n\t\tif (destroy) destroy_effect(effect);\n\t\tif (callback) callback();\n\t};\n\n\tvar remaining = transitions.length;\n\tif (remaining > 0) {\n\t\tvar check = () => --remaining || fn();\n\t\tfor (var transition of transitions) {\n\t\t\ttransition.out(check);\n\t\t}\n\t} else {\n\t\tfn();\n\t}\n}\n\n/**\n * @param {Effect} effect\n * @param {TransitionManager[]} transitions\n * @param {boolean} local\n */\nfunction pause_children(effect, transitions, local) {\n\tif ((effect.f & INERT) !== 0) return;\n\teffect.f ^= INERT;\n\n\tvar t = effect.nodes && effect.nodes.t;\n\n\tif (t !== null) {\n\t\tfor (const transition of t) {\n\t\t\tif (transition.is_global || local) {\n\t\t\t\ttransitions.push(transition);\n\t\t\t}\n\t\t}\n\t}\n\n\tvar child = effect.first;\n\n\twhile (child !== null) {\n\t\tvar sibling = child.next;\n\n\t\t// If this child is a root effect, then it will become an independent root when its parent\n\t\t// is destroyed, it should therefore not become inert nor partake in transitions.\n\t\tif ((child.f & ROOT_EFFECT) === 0) {\n\t\t\tvar transparent =\n\t\t\t\t(child.f & EFFECT_TRANSPARENT) !== 0 ||\n\t\t\t\t// If this is a branch effect without a block effect parent,\n\t\t\t\t// it means the parent block effect was pruned. In that case,\n\t\t\t\t// transparency information was transferred to the branch effect.\n\t\t\t\t((child.f & BRANCH_EFFECT) !== 0 && (effect.f & BLOCK_EFFECT) !== 0);\n\t\t\t// TODO we don't need to call pause_children recursively with a linked list in place\n\t\t\t// it's slightly more involved though as we have to account for `transparent` changing\n\t\t\t// through the tree.\n\t\t\tpause_children(child, transitions, transparent ? local : false);\n\t\t}\n\n\t\tchild = sibling;\n\t}\n}\n\n/**\n * The opposite of `pause_effect`. We call this if (for example)\n * `x` becomes falsy then truthy: `{#if x}...{/if}`\n * @param {Effect} effect\n */\nexport function resume_effect(effect) {\n\tresume_children(effect, true);\n}\n\n/**\n * @param {Effect} effect\n * @param {boolean} local\n */\nfunction resume_children(effect, local) {\n\tif ((effect.f & INERT) === 0) return;\n\teffect.f ^= INERT;\n\n\t// If a dependency of this effect changed while it was paused,\n\t// schedule the effect to update. we don't use `is_dirty`\n\t// here because we don't want to eagerly recompute a derived like\n\t// `{#if foo}{foo.bar()}{/if}` if `foo` is now `undefined\n\tif ((effect.f & CLEAN) === 0) {\n\t\tset_signal_status(effect, DIRTY);\n\t\tBatch.ensure().schedule(effect); // Assumption: This happens during the commit phase of the batch, causing another flush, but it's safe\n\t}\n\n\tvar child = effect.first;\n\n\twhile (child !== null) {\n\t\tvar sibling = child.next;\n\t\tvar transparent = (child.f & EFFECT_TRANSPARENT) !== 0 || (child.f & BRANCH_EFFECT) !== 0;\n\t\t// TODO we don't need to call resume_children recursively with a linked list in place\n\t\t// it's slightly more involved though as we have to account for `transparent` changing\n\t\t// through the tree.\n\t\tresume_children(child, transparent ? local : false);\n\t\tchild = sibling;\n\t}\n\n\tvar t = effect.nodes && effect.nodes.t;\n\n\tif (t !== null) {\n\t\tfor (const transition of t) {\n\t\t\tif (transition.is_global || local) {\n\t\t\t\ttransition.in();\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport function aborted(effect = /** @type {Effect} */ (active_effect)) {\n\treturn (effect.f & DESTROYED) !== 0;\n}\n\n/**\n * @param {Effect} effect\n * @param {DocumentFragment} fragment\n */\nexport function move_effect(effect, fragment) {\n\tif (!effect.nodes) return;\n\n\t/** @type {TemplateNode | null} */\n\tvar node = effect.nodes.start;\n\tvar end = effect.nodes.end;\n\n\twhile (node !== null) {\n\t\t/** @type {TemplateNode | null} */\n\t\tvar next = node === end ? null : get_next_sibling(node);\n\n\t\tfragment.append(node);\n\t\tnode = next;\n\t}\n}\n","/** @import { Derived, Effect, Reaction, Source, Value } from '#client' */\nimport { DEV } from 'esm-env';\nimport { get_descriptors, get_prototype_of, includes, index_of } from '../shared/utils.js';\nimport {\n\tdestroy_block_effect_children,\n\tdestroy_effect_children,\n\teffect_tracking,\n\texecute_effect_teardown\n} from './reactivity/effects.js';\nimport {\n\tDIRTY,\n\tMAYBE_DIRTY,\n\tCLEAN,\n\tDERIVED,\n\tDESTROYED,\n\tBRANCH_EFFECT,\n\tSTATE_SYMBOL,\n\tBLOCK_EFFECT,\n\tROOT_EFFECT,\n\tCONNECTED,\n\tREACTION_IS_UPDATING,\n\tSTALE_REACTION,\n\tERROR_VALUE,\n\tWAS_MARKED,\n\tMANAGED_EFFECT,\n\tREACTION_RAN\n} from './constants.js';\nimport { old_values } from './reactivity/sources.js';\nimport {\n\treactivity_loss_tracker,\n\texecute_derived,\n\tfreeze_derived_effects,\n\trecent_async_deriveds,\n\tunfreeze_derived_effects,\n\tupdate_derived\n} from './reactivity/deriveds.js';\nimport { async_mode_flag, tracing_mode_flag } from '../flags/index.js';\nimport { tracing_expressions } from './dev/tracing.js';\nimport { get_error } from '../shared/dev.js';\nimport {\n\tcomponent_context,\n\tdev_current_component_function,\n\tdev_stack,\n\tis_runes,\n\tset_component_context,\n\tset_dev_current_component_function,\n\tset_dev_stack\n} from './context.js';\nimport {\n\tBatch,\n\tbatch_values,\n\tcurrent_batch,\n\tflushSync,\n\tschedule_effect\n} from './reactivity/batch.js';\nimport { handle_error } from './error-handling.js';\nimport { UNINITIALIZED } from '../../constants.js';\nimport { captured_signals } from './legacy.js';\nimport { without_reactive_context } from './dom/elements/bindings/shared.js';\nimport { set_signal_status, update_derived_status } from './reactivity/status.js';\nimport * as w from './warnings.js';\n\nlet is_updating_effect = false;\n\nexport let is_destroying_effect = false;\n\n/** @param {boolean} value */\nexport function set_is_destroying_effect(value) {\n\tis_destroying_effect = value;\n}\n\n/** @type {null | Reaction} */\nexport let active_reaction = null;\n\nexport let untracking = false;\n\n/** @param {null | Reaction} reaction */\nexport function set_active_reaction(reaction) {\n\tactive_reaction = reaction;\n}\n\n/** @type {null | Effect} */\nexport let active_effect = null;\n\n/** @param {null | Effect} effect */\nexport function set_active_effect(effect) {\n\tactive_effect = effect;\n}\n\n/**\n * When sources are created within a reaction, reading and writing\n * them within that reaction should not cause a re-run\n * @type {null | Source[]}\n */\nexport let current_sources = null;\n\n/** @param {Value} value */\nexport function push_reaction_value(value) {\n\tif (active_reaction !== null && (!async_mode_flag || (active_reaction.f & DERIVED) !== 0)) {\n\t\tif (current_sources === null) {\n\t\t\tcurrent_sources = [value];\n\t\t} else {\n\t\t\tcurrent_sources.push(value);\n\t\t}\n\t}\n}\n\n/**\n * The dependencies of the reaction that is currently being executed. In many cases,\n * the dependencies are unchanged between runs, and so this will be `null` unless\n * and until a new dependency is accessed — we track this via `skipped_deps`\n * @type {null | Value[]}\n */\nexport let new_deps = null;\n\nexport let skipped_deps = 0;\n\n/**\n * Tracks writes that the effect it's executed in doesn't listen to yet,\n * so that the dependency can be added to the effect later on if it then reads it\n * @type {null | Source[]}\n */\nexport let untracked_writes = null;\n\n/** @param {null | Source[]} value */\nexport function set_untracked_writes(value) {\n\tuntracked_writes = value;\n}\n\n/**\n * @type {number} Used by sources and deriveds for handling updates.\n * Version starts from 1 so that unowned deriveds differentiate between a created effect and a run one for tracing\n **/\nexport let write_version = 1;\n\n/** @type {number} Used to version each read of a source of derived to avoid duplicating depedencies inside a reaction */\nlet read_version = 0;\n\nexport let update_version = read_version;\n\n/** @param {number} value */\nexport function set_update_version(value) {\n\tupdate_version = value;\n}\n\nexport function increment_write_version() {\n\treturn ++write_version;\n}\n\n/**\n * Determines whether a derived or effect is dirty.\n * If it is MAYBE_DIRTY, will set the status to CLEAN\n * @param {Reaction} reaction\n * @returns {boolean}\n */\nexport function is_dirty(reaction) {\n\tvar flags = reaction.f;\n\n\tif ((flags & DIRTY) !== 0) {\n\t\treturn true;\n\t}\n\n\tif (flags & DERIVED) {\n\t\treaction.f &= ~WAS_MARKED;\n\t}\n\n\tif ((flags & MAYBE_DIRTY) !== 0) {\n\t\tvar dependencies = /** @type {Value[]} */ (reaction.deps);\n\t\tvar length = dependencies.length;\n\n\t\tfor (var i = 0; i < length; i++) {\n\t\t\tvar dependency = dependencies[i];\n\n\t\t\tif (is_dirty(/** @type {Derived} */ (dependency))) {\n\t\t\t\tupdate_derived(/** @type {Derived} */ (dependency));\n\t\t\t}\n\n\t\t\tif (dependency.wv > reaction.wv) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\n\t\tif (\n\t\t\t(flags & CONNECTED) !== 0 &&\n\t\t\t// During time traveling we don't want to reset the status so that\n\t\t\t// traversal of the graph in the other batches still happens\n\t\t\tbatch_values === null\n\t\t) {\n\t\t\tset_signal_status(reaction, CLEAN);\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * @param {Value} signal\n * @param {Effect} effect\n * @param {boolean} [root]\n */\nfunction schedule_possible_effect_self_invalidation(signal, effect, root = true) {\n\tvar reactions = signal.reactions;\n\tif (reactions === null) return;\n\n\tif (!async_mode_flag && current_sources !== null && includes.call(current_sources, signal)) {\n\t\treturn;\n\t}\n\n\tfor (var i = 0; i < reactions.length; i++) {\n\t\tvar reaction = reactions[i];\n\n\t\tif ((reaction.f & DERIVED) !== 0) {\n\t\t\tschedule_possible_effect_self_invalidation(/** @type {Derived} */ (reaction), effect, false);\n\t\t} else if (effect === reaction) {\n\t\t\tif (root) {\n\t\t\t\tset_signal_status(reaction, DIRTY);\n\t\t\t} else if ((reaction.f & CLEAN) !== 0) {\n\t\t\t\tset_signal_status(reaction, MAYBE_DIRTY);\n\t\t\t}\n\t\t\tschedule_effect(/** @type {Effect} */ (reaction));\n\t\t}\n\t}\n}\n\n/** @param {Reaction} reaction */\nexport function update_reaction(reaction) {\n\tvar previous_deps = new_deps;\n\tvar previous_skipped_deps = skipped_deps;\n\tvar previous_untracked_writes = untracked_writes;\n\tvar previous_reaction = active_reaction;\n\tvar previous_sources = current_sources;\n\tvar previous_component_context = component_context;\n\tvar previous_untracking = untracking;\n\tvar previous_update_version = update_version;\n\n\tvar flags = reaction.f;\n\n\tnew_deps = /** @type {null | Value[]} */ (null);\n\tskipped_deps = 0;\n\tuntracked_writes = null;\n\tactive_reaction = (flags & (BRANCH_EFFECT | ROOT_EFFECT)) === 0 ? reaction : null;\n\n\tcurrent_sources = null;\n\tset_component_context(reaction.ctx);\n\tuntracking = false;\n\tupdate_version = ++read_version;\n\n\tif (reaction.ac !== null) {\n\t\twithout_reactive_context(() => {\n\t\t\t/** @type {AbortController} */ (reaction.ac).abort(STALE_REACTION);\n\t\t});\n\n\t\treaction.ac = null;\n\t}\n\n\ttry {\n\t\treaction.f |= REACTION_IS_UPDATING;\n\t\tvar fn = /** @type {Function} */ (reaction.fn);\n\t\tvar result = fn();\n\t\treaction.f |= REACTION_RAN;\n\t\tvar deps = reaction.deps;\n\n\t\t// Don't remove reactions during fork;\n\t\t// they must remain for when fork is discarded\n\t\tvar is_fork = current_batch?.is_fork;\n\n\t\tif (new_deps !== null) {\n\t\t\tvar i;\n\n\t\t\tif (!is_fork) {\n\t\t\t\tremove_reactions(reaction, skipped_deps);\n\t\t\t}\n\n\t\t\tif (deps !== null && skipped_deps > 0) {\n\t\t\t\tdeps.length = skipped_deps + new_deps.length;\n\t\t\t\tfor (i = 0; i < new_deps.length; i++) {\n\t\t\t\t\tdeps[skipped_deps + i] = new_deps[i];\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treaction.deps = deps = new_deps;\n\t\t\t}\n\n\t\t\tif (effect_tracking() && (reaction.f & CONNECTED) !== 0) {\n\t\t\t\tfor (i = skipped_deps; i < deps.length; i++) {\n\t\t\t\t\t(deps[i].reactions ??= []).push(reaction);\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (!is_fork && deps !== null && skipped_deps < deps.length) {\n\t\t\tremove_reactions(reaction, skipped_deps);\n\t\t\tdeps.length = skipped_deps;\n\t\t}\n\n\t\t// If we're inside an effect and we have untracked writes, then we need to\n\t\t// ensure that if any of those untracked writes result in re-invalidation\n\t\t// of the current effect, then that happens accordingly\n\t\tif (\n\t\t\tis_runes() &&\n\t\t\tuntracked_writes !== null &&\n\t\t\t!untracking &&\n\t\t\tdeps !== null &&\n\t\t\t(reaction.f & (DERIVED | MAYBE_DIRTY | DIRTY)) === 0\n\t\t) {\n\t\t\tfor (i = 0; i < /** @type {Source[]} */ (untracked_writes).length; i++) {\n\t\t\t\tschedule_possible_effect_self_invalidation(\n\t\t\t\t\tuntracked_writes[i],\n\t\t\t\t\t/** @type {Effect} */ (reaction)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// If we are returning to an previous reaction then\n\t\t// we need to increment the read version to ensure that\n\t\t// any dependencies in this reaction aren't marked with\n\t\t// the same version\n\t\tif (previous_reaction !== null && previous_reaction !== reaction) {\n\t\t\tread_version++;\n\n\t\t\t// update the `rv` of the previous reaction's deps — both existing and new —\n\t\t\t// so that they are not added again\n\t\t\tif (previous_reaction.deps !== null) {\n\t\t\t\tfor (let i = 0; i < previous_skipped_deps; i += 1) {\n\t\t\t\t\tprevious_reaction.deps[i].rv = read_version;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (previous_deps !== null) {\n\t\t\t\tfor (const dep of previous_deps) {\n\t\t\t\t\tdep.rv = read_version;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (untracked_writes !== null) {\n\t\t\t\tif (previous_untracked_writes === null) {\n\t\t\t\t\tprevious_untracked_writes = untracked_writes;\n\t\t\t\t} else {\n\t\t\t\t\tprevious_untracked_writes.push(.../** @type {Source[]} */ (untracked_writes));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif ((reaction.f & ERROR_VALUE) !== 0) {\n\t\t\treaction.f ^= ERROR_VALUE;\n\t\t}\n\n\t\treturn result;\n\t} catch (error) {\n\t\treturn handle_error(error);\n\t} finally {\n\t\treaction.f ^= REACTION_IS_UPDATING;\n\t\tnew_deps = previous_deps;\n\t\tskipped_deps = previous_skipped_deps;\n\t\tuntracked_writes = previous_untracked_writes;\n\t\tactive_reaction = previous_reaction;\n\t\tcurrent_sources = previous_sources;\n\t\tset_component_context(previous_component_context);\n\t\tuntracking = previous_untracking;\n\t\tupdate_version = previous_update_version;\n\t}\n}\n\n/**\n * @template V\n * @param {Reaction} signal\n * @param {Value} dependency\n * @returns {void}\n */\nfunction remove_reaction(signal, dependency) {\n\tlet reactions = dependency.reactions;\n\tif (reactions !== null) {\n\t\tvar index = index_of.call(reactions, signal);\n\t\tif (index !== -1) {\n\t\t\tvar new_length = reactions.length - 1;\n\t\t\tif (new_length === 0) {\n\t\t\t\treactions = dependency.reactions = null;\n\t\t\t} else {\n\t\t\t\t// Swap with last element and then remove.\n\t\t\t\treactions[index] = reactions[new_length];\n\t\t\t\treactions.pop();\n\t\t\t}\n\t\t}\n\t}\n\n\t// If the derived has no reactions, then we can disconnect it from the graph,\n\t// allowing it to either reconnect in the future, or be GC'd by the VM.\n\tif (\n\t\treactions === null &&\n\t\t(dependency.f & DERIVED) !== 0 &&\n\t\t// Destroying a child effect while updating a parent effect can cause a dependency to appear\n\t\t// to be unused, when in fact it is used by the currently-updating parent. Checking `new_deps`\n\t\t// allows us to skip the expensive work of disconnecting and immediately reconnecting it\n\t\t(new_deps === null || !includes.call(new_deps, dependency))\n\t) {\n\t\tvar derived = /** @type {Derived} */ (dependency);\n\n\t\t// If we are working with a derived that is owned by an effect, then mark it as being\n\t\t// disconnected and remove the mark flag, as it cannot be reliably removed otherwise\n\t\tif ((derived.f & CONNECTED) !== 0) {\n\t\t\tderived.f ^= CONNECTED;\n\t\t\tderived.f &= ~WAS_MARKED;\n\t\t}\n\n\t\t// In a fork it's possible that a derived is executed and gets reactions, then commits, but is\n\t\t// never re-executed. This is possible when the derived is only executed once in the context\n\t\t// of a new branch which happens before fork.commit() runs. In this case, the derived still has\n\t\t// UNINITIALIZED as its value, and then when it's loosing its reactions we need to ensure it stays\n\t\t// DIRTY so it is reexecuted once someone wants its value again.\n\t\tif (derived.v !== UNINITIALIZED) {\n\t\t\tupdate_derived_status(derived);\n\t\t}\n\n\t\t// freeze any effects inside this derived\n\t\tfreeze_derived_effects(derived);\n\n\t\t// Disconnect any reactions owned by this reaction\n\t\tremove_reactions(derived, 0);\n\t}\n}\n\n/**\n * @param {Reaction} signal\n * @param {number} start_index\n * @returns {void}\n */\nexport function remove_reactions(signal, start_index) {\n\tvar dependencies = signal.deps;\n\tif (dependencies === null) return;\n\n\tfor (var i = start_index; i < dependencies.length; i++) {\n\t\tremove_reaction(signal, dependencies[i]);\n\t}\n}\n\n/**\n * @param {Effect} effect\n * @returns {void}\n */\nexport function update_effect(effect) {\n\tvar flags = effect.f;\n\n\tif ((flags & DESTROYED) !== 0) {\n\t\treturn;\n\t}\n\n\tset_signal_status(effect, CLEAN);\n\n\tvar previous_effect = active_effect;\n\tvar was_updating_effect = is_updating_effect;\n\n\tactive_effect = effect;\n\tis_updating_effect = true;\n\n\tif (DEV) {\n\t\tvar previous_component_fn = dev_current_component_function;\n\t\tset_dev_current_component_function(effect.component_function);\n\t\tvar previous_stack = /** @type {any} */ (dev_stack);\n\t\t// only block effects have a dev stack, keep the current one otherwise\n\t\tset_dev_stack(effect.dev_stack ?? dev_stack);\n\t}\n\n\ttry {\n\t\tif ((flags & (BLOCK_EFFECT | MANAGED_EFFECT)) !== 0) {\n\t\t\tdestroy_block_effect_children(effect);\n\t\t} else {\n\t\t\tdestroy_effect_children(effect);\n\t\t}\n\n\t\texecute_effect_teardown(effect);\n\t\tvar teardown = update_reaction(effect);\n\t\teffect.teardown = typeof teardown === 'function' ? teardown : null;\n\t\teffect.wv = write_version;\n\n\t\t// In DEV, increment versions of any sources that were written to during the effect,\n\t\t// so that they are correctly marked as dirty when the effect re-runs\n\t\tif (DEV && tracing_mode_flag && (effect.f & DIRTY) !== 0 && effect.deps !== null) {\n\t\t\tfor (var dep of effect.deps) {\n\t\t\t\tif (dep.set_during_effect) {\n\t\t\t\t\tdep.wv = increment_write_version();\n\t\t\t\t\tdep.set_during_effect = false;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} finally {\n\t\tis_updating_effect = was_updating_effect;\n\t\tactive_effect = previous_effect;\n\n\t\tif (DEV) {\n\t\t\tset_dev_current_component_function(previous_component_fn);\n\t\t\tset_dev_stack(previous_stack);\n\t\t}\n\t}\n}\n\n/**\n * Returns a promise that resolves once any pending state changes have been applied.\n * @returns {Promise}\n */\nexport async function tick() {\n\tif (async_mode_flag) {\n\t\treturn new Promise((f) => {\n\t\t\t// Race them against each other - in almost all cases requestAnimationFrame will fire first,\n\t\t\t// but e.g. in case the window is not focused or a view transition happens, requestAnimationFrame\n\t\t\t// will be delayed and setTimeout helps us resolve fast enough in that case\n\t\t\trequestAnimationFrame(() => f());\n\t\t\tsetTimeout(() => f());\n\t\t});\n\t}\n\n\tawait Promise.resolve();\n\n\t// By calling flushSync we guarantee that any pending state changes are applied after one tick.\n\t// TODO look into whether we can make flushing subsequent updates synchronously in the future.\n\tflushSync();\n}\n\n/**\n * Returns a promise that resolves once any state changes, and asynchronous work resulting from them,\n * have resolved and the DOM has been updated\n * @returns {Promise}\n * @since 5.36\n */\nexport function settled() {\n\treturn Batch.ensure().settled();\n}\n\n/**\n * @template V\n * @param {Value} signal\n * @returns {V}\n */\nexport function get(signal) {\n\tvar flags = signal.f;\n\tvar is_derived = (flags & DERIVED) !== 0;\n\n\tcaptured_signals?.add(signal);\n\n\t// Register the dependency on the current reaction signal.\n\tif (active_reaction !== null && !untracking) {\n\t\t// if we're in a derived that is being read inside an _async_ derived,\n\t\t// it's possible that the effect was already destroyed. In this case,\n\t\t// we don't add the dependency, because that would create a memory leak\n\t\tvar destroyed = active_effect !== null && (active_effect.f & DESTROYED) !== 0;\n\n\t\tif (!destroyed && (current_sources === null || !includes.call(current_sources, signal))) {\n\t\t\tvar deps = active_reaction.deps;\n\n\t\t\tif ((active_reaction.f & REACTION_IS_UPDATING) !== 0) {\n\t\t\t\t// we're in the effect init/update cycle\n\t\t\t\tif (signal.rv < read_version) {\n\t\t\t\t\tsignal.rv = read_version;\n\n\t\t\t\t\t// If the signal is accessing the same dependencies in the same\n\t\t\t\t\t// order as it did last time, increment `skipped_deps`\n\t\t\t\t\t// rather than updating `new_deps`, which creates GC cost\n\t\t\t\t\tif (new_deps === null && deps !== null && deps[skipped_deps] === signal) {\n\t\t\t\t\t\tskipped_deps++;\n\t\t\t\t\t} else if (new_deps === null) {\n\t\t\t\t\t\tnew_deps = [signal];\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnew_deps.push(signal);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// we're adding a dependency outside the init/update cycle\n\t\t\t\t// (i.e. after an `await`)\n\t\t\t\t(active_reaction.deps ??= []).push(signal);\n\n\t\t\t\tvar reactions = signal.reactions;\n\n\t\t\t\tif (reactions === null) {\n\t\t\t\t\tsignal.reactions = [active_reaction];\n\t\t\t\t} else if (!includes.call(reactions, active_reaction)) {\n\t\t\t\t\treactions.push(active_reaction);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif (DEV) {\n\t\tif (\n\t\t\t!untracking &&\n\t\t\treactivity_loss_tracker &&\n\t\t\t!reactivity_loss_tracker.warned &&\n\t\t\t(reactivity_loss_tracker.effect.f & REACTION_IS_UPDATING) === 0 &&\n\t\t\t!reactivity_loss_tracker.effect_deps.has(signal)\n\t\t) {\n\t\t\treactivity_loss_tracker.warned = true;\n\n\t\t\tw.await_reactivity_loss(/** @type {string} */ (signal.label));\n\n\t\t\tvar trace = get_error('traced at');\n\t\t\t// eslint-disable-next-line no-console\n\t\t\tif (trace) console.warn(trace);\n\t\t}\n\n\t\trecent_async_deriveds.delete(signal);\n\n\t\tif (\n\t\t\ttracing_mode_flag &&\n\t\t\t!untracking &&\n\t\t\ttracing_expressions !== null &&\n\t\t\tactive_reaction !== null &&\n\t\t\ttracing_expressions.reaction === active_reaction\n\t\t) {\n\t\t\t// Used when mapping state between special blocks like `each`\n\t\t\tif (signal.trace) {\n\t\t\t\tsignal.trace();\n\t\t\t} else {\n\t\t\t\ttrace = get_error('traced at');\n\n\t\t\t\tif (trace) {\n\t\t\t\t\tvar entry = tracing_expressions.entries.get(signal);\n\n\t\t\t\t\tif (entry === undefined) {\n\t\t\t\t\t\tentry = { traces: [] };\n\t\t\t\t\t\ttracing_expressions.entries.set(signal, entry);\n\t\t\t\t\t}\n\n\t\t\t\t\tvar last = entry.traces[entry.traces.length - 1];\n\n\t\t\t\t\t// traces can be duplicated, e.g. by `snapshot` invoking both\n\t\t\t\t\t// both `getOwnPropertyDescriptor` and `get` traps at once\n\t\t\t\t\tif (trace.stack !== last?.stack) {\n\t\t\t\t\t\tentry.traces.push(trace);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif (is_destroying_effect && old_values.has(signal)) {\n\t\treturn old_values.get(signal);\n\t}\n\n\tif (is_derived) {\n\t\tvar derived = /** @type {Derived} */ (signal);\n\n\t\tif (is_destroying_effect) {\n\t\t\tvar value = derived.v;\n\n\t\t\t// if the derived is dirty and has reactions, or depends on the values that just changed, re-execute\n\t\t\t// (a derived can be maybe_dirty due to the effect destroy removing its last reaction)\n\t\t\tif (\n\t\t\t\t((derived.f & CLEAN) === 0 && derived.reactions !== null) ||\n\t\t\t\tdepends_on_old_values(derived)\n\t\t\t) {\n\t\t\t\tvalue = execute_derived(derived);\n\t\t\t}\n\n\t\t\told_values.set(derived, value);\n\n\t\t\treturn value;\n\t\t}\n\n\t\t// connect disconnected deriveds if we are reading them inside an effect,\n\t\t// or inside another derived that is already connected\n\t\tvar should_connect =\n\t\t\t(derived.f & CONNECTED) === 0 &&\n\t\t\t!untracking &&\n\t\t\tactive_reaction !== null &&\n\t\t\t(is_updating_effect || (active_reaction.f & CONNECTED) !== 0);\n\n\t\tvar is_new = (derived.f & REACTION_RAN) === 0;\n\n\t\tif (is_dirty(derived)) {\n\t\t\tif (should_connect) {\n\t\t\t\t// set the flag before `update_derived`, so that the derived\n\t\t\t\t// is added as a reaction to its dependencies\n\t\t\t\tderived.f |= CONNECTED;\n\t\t\t}\n\n\t\t\tupdate_derived(derived);\n\t\t}\n\n\t\tif (should_connect && !is_new) {\n\t\t\tunfreeze_derived_effects(derived);\n\t\t\treconnect(derived);\n\t\t}\n\t}\n\n\tif (batch_values?.has(signal)) {\n\t\treturn batch_values.get(signal);\n\t}\n\n\tif ((signal.f & ERROR_VALUE) !== 0) {\n\t\tthrow signal.v;\n\t}\n\n\treturn signal.v;\n}\n\n/**\n * (Re)connect a disconnected derived, so that it is notified\n * of changes in `mark_reactions`\n * @param {Derived} derived\n */\nfunction reconnect(derived) {\n\tderived.f |= CONNECTED;\n\n\tif (derived.deps === null) return;\n\n\tfor (const dep of derived.deps) {\n\t\t(dep.reactions ??= []).push(derived);\n\n\t\tif ((dep.f & DERIVED) !== 0 && (dep.f & CONNECTED) === 0) {\n\t\t\tunfreeze_derived_effects(/** @type {Derived} */ (dep));\n\t\t\treconnect(/** @type {Derived} */ (dep));\n\t\t}\n\t}\n}\n\n/** @param {Derived} derived */\nfunction depends_on_old_values(derived) {\n\tif (derived.v === UNINITIALIZED) return true; // we don't know, so assume the worst\n\tif (derived.deps === null) return false;\n\n\tfor (const dep of derived.deps) {\n\t\tif (old_values.has(dep)) {\n\t\t\treturn true;\n\t\t}\n\n\t\tif ((dep.f & DERIVED) !== 0 && depends_on_old_values(/** @type {Derived} */ (dep))) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Like `get`, but checks for `undefined`. Used for `var` declarations because they can be accessed before being declared\n * @template V\n * @param {Value | undefined} signal\n * @returns {V | undefined}\n */\nexport function safe_get(signal) {\n\treturn signal && get(signal);\n}\n\n/**\n * When used inside a [`$derived`](https://svelte.dev/docs/svelte/$derived) or [`$effect`](https://svelte.dev/docs/svelte/$effect),\n * any state read inside `fn` will not be treated as a dependency.\n *\n * ```ts\n * $effect(() => {\n * // this will run when `data` changes, but not when `time` changes\n * save(data, {\n * timestamp: untrack(() => time)\n * });\n * });\n * ```\n * @template T\n * @param {() => T} fn\n * @returns {T}\n */\nexport function untrack(fn) {\n\tvar previous_untracking = untracking;\n\ttry {\n\t\tuntracking = true;\n\t\treturn fn();\n\t} finally {\n\t\tuntracking = previous_untracking;\n\t}\n}\n\n/**\n * Possibly traverse an object and read all its properties so that they're all reactive in case this is `$state`.\n * Does only check first level of an object for performance reasons (heuristic should be good for 99% of all cases).\n * @param {any} value\n * @returns {void}\n */\nexport function deep_read_state(value) {\n\tif (typeof value !== 'object' || !value || value instanceof EventTarget) {\n\t\treturn;\n\t}\n\n\tif (STATE_SYMBOL in value) {\n\t\tdeep_read(value);\n\t} else if (!Array.isArray(value)) {\n\t\tfor (let key in value) {\n\t\t\tconst prop = value[key];\n\t\t\tif (typeof prop === 'object' && prop && STATE_SYMBOL in prop) {\n\t\t\t\tdeep_read(prop);\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * Deeply traverse an object and read all its properties\n * so that they're all reactive in case this is `$state`\n * @param {any} value\n * @param {Set} visited\n * @returns {void}\n */\nexport function deep_read(value, visited = new Set()) {\n\tif (\n\t\ttypeof value === 'object' &&\n\t\tvalue !== null &&\n\t\t// We don't want to traverse DOM elements\n\t\t!(value instanceof EventTarget) &&\n\t\t!visited.has(value)\n\t) {\n\t\tvisited.add(value);\n\t\t// When working with a possible SvelteDate, this\n\t\t// will ensure we capture changes to it.\n\t\tif (value instanceof Date) {\n\t\t\tvalue.getTime();\n\t\t}\n\t\tfor (let key in value) {\n\t\t\ttry {\n\t\t\t\tdeep_read(value[key], visited);\n\t\t\t} catch (e) {\n\t\t\t\t// continue\n\t\t\t}\n\t\t}\n\t\tconst proto = get_prototype_of(value);\n\t\tif (\n\t\t\tproto !== Object.prototype &&\n\t\t\tproto !== Array.prototype &&\n\t\t\tproto !== Map.prototype &&\n\t\t\tproto !== Set.prototype &&\n\t\t\tproto !== Date.prototype\n\t\t) {\n\t\t\tconst descriptors = get_descriptors(proto);\n\t\t\tfor (let key in descriptors) {\n\t\t\t\tconst get = descriptors[key].get;\n\t\t\t\tif (get) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tget.call(value);\n\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t// continue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n","import { teardown } from '../../reactivity/effects.js';\nimport { define_property } from '../../../shared/utils.js';\nimport { hydrating } from '../hydration.js';\nimport { queue_micro_task } from '../task.js';\nimport { FILENAME } from '../../../../constants.js';\nimport * as w from '../../warnings.js';\nimport {\n\tactive_effect,\n\tactive_reaction,\n\tset_active_effect,\n\tset_active_reaction\n} from '../../runtime.js';\nimport { without_reactive_context } from './bindings/shared.js';\n\n/**\n * Used on elements, as a map of event type -> event handler,\n * and on events themselves to track which element handled an event\n */\nexport const event_symbol = Symbol('events');\n\n/** @type {Set} */\nexport const all_registered_events = new Set();\n\n/** @type {Set<(events: Array) => void>} */\nexport const root_event_handles = new Set();\n\n/**\n * SSR adds onload and onerror attributes to catch those events before the hydration.\n * This function detects those cases, removes the attributes and replays the events.\n * @param {HTMLElement} dom\n */\nexport function replay_events(dom) {\n\tif (!hydrating) return;\n\n\tdom.removeAttribute('onload');\n\tdom.removeAttribute('onerror');\n\t// @ts-expect-error\n\tconst event = dom.__e;\n\tif (event !== undefined) {\n\t\t// @ts-expect-error\n\t\tdom.__e = undefined;\n\t\tqueueMicrotask(() => {\n\t\t\tif (dom.isConnected) {\n\t\t\t\tdom.dispatchEvent(event);\n\t\t\t}\n\t\t});\n\t}\n}\n\n/**\n * @param {string} event_name\n * @param {EventTarget} dom\n * @param {EventListener} [handler]\n * @param {AddEventListenerOptions} [options]\n */\nexport function create_event(event_name, dom, handler, options = {}) {\n\t/**\n\t * @this {EventTarget}\n\t */\n\tfunction target_handler(/** @type {Event} */ event) {\n\t\tif (!options.capture) {\n\t\t\t// Only call in the bubble phase, else delegated events would be called before the capturing events\n\t\t\thandle_event_propagation.call(dom, event);\n\t\t}\n\t\tif (!event.cancelBubble) {\n\t\t\treturn without_reactive_context(() => {\n\t\t\t\treturn handler?.call(this, event);\n\t\t\t});\n\t\t}\n\t}\n\n\t// Chrome has a bug where pointer events don't work when attached to a DOM element that has been cloned\n\t// with cloneNode() and the DOM element is disconnected from the document. To ensure the event works, we\n\t// defer the attachment till after it's been appended to the document. TODO: remove this once Chrome fixes\n\t// this bug. The same applies to wheel events and touch events.\n\tif (\n\t\tevent_name.startsWith('pointer') ||\n\t\tevent_name.startsWith('touch') ||\n\t\tevent_name === 'wheel'\n\t) {\n\t\tqueue_micro_task(() => {\n\t\t\tdom.addEventListener(event_name, target_handler, options);\n\t\t});\n\t} else {\n\t\tdom.addEventListener(event_name, target_handler, options);\n\t}\n\n\treturn target_handler;\n}\n\n/**\n * Attaches an event handler to an element and returns a function that removes the handler. Using this\n * rather than `addEventListener` will preserve the correct order relative to handlers added declaratively\n * (with attributes like `onclick`), which use event delegation for performance reasons\n *\n * @param {EventTarget} element\n * @param {string} type\n * @param {EventListener} handler\n * @param {AddEventListenerOptions} [options]\n */\nexport function on(element, type, handler, options = {}) {\n\tvar target_handler = create_event(type, element, handler, options);\n\n\treturn () => {\n\t\telement.removeEventListener(type, target_handler, options);\n\t};\n}\n\n/**\n * @param {string} event_name\n * @param {Element} dom\n * @param {EventListener} [handler]\n * @param {boolean} [capture]\n * @param {boolean} [passive]\n * @returns {void}\n */\nexport function event(event_name, dom, handler, capture, passive) {\n\tvar options = { capture, passive };\n\tvar target_handler = create_event(event_name, dom, handler, options);\n\n\tif (\n\t\tdom === document.body ||\n\t\t// @ts-ignore\n\t\tdom === window ||\n\t\t// @ts-ignore\n\t\tdom === document ||\n\t\t// Firefox has quirky behavior, it can happen that we still get \"canplay\" events when the element is already removed\n\t\tdom instanceof HTMLMediaElement\n\t) {\n\t\tteardown(() => {\n\t\t\tdom.removeEventListener(event_name, target_handler, options);\n\t\t});\n\t}\n}\n\n/**\n * @param {string} event_name\n * @param {Element} element\n * @param {EventListener} [handler]\n * @returns {void}\n */\nexport function delegated(event_name, element, handler) {\n\t// @ts-expect-error\n\t(element[event_symbol] ??= {})[event_name] = handler;\n}\n\n/**\n * @param {Array} events\n * @returns {void}\n */\nexport function delegate(events) {\n\tfor (var i = 0; i < events.length; i++) {\n\t\tall_registered_events.add(events[i]);\n\t}\n\n\tfor (var fn of root_event_handles) {\n\t\tfn(events);\n\t}\n}\n\n// used to store the reference to the currently propagated event\n// to prevent garbage collection between microtasks in Firefox\n// If the event object is GCed too early, the expando __root property\n// set on the event object is lost, causing the event delegation\n// to process the event twice\nlet last_propagated_event = null;\n\n/**\n * @this {EventTarget}\n * @param {Event} event\n * @returns {void}\n */\nexport function handle_event_propagation(event) {\n\tvar handler_element = this;\n\tvar owner_document = /** @type {Node} */ (handler_element).ownerDocument;\n\tvar event_name = event.type;\n\tvar path = event.composedPath?.() || [];\n\tvar current_target = /** @type {null | Element} */ (path[0] || event.target);\n\n\tlast_propagated_event = event;\n\n\t// composedPath contains list of nodes the event has propagated through.\n\t// We check `event_symbol` to skip all nodes below it in case this is a\n\t// parent of the `event_symbol` node, which indicates that there's nested\n\t// mounted apps. In this case we don't want to trigger events multiple times.\n\tvar path_idx = 0;\n\n\t// the `last_propagated_event === event` check is redundant, but\n\t// without it the variable will be DCE'd and things will\n\t// fail mysteriously in Firefox\n\t// @ts-expect-error is added below\n\tvar handled_at = last_propagated_event === event && event[event_symbol];\n\n\tif (handled_at) {\n\t\tvar at_idx = path.indexOf(handled_at);\n\t\tif (\n\t\t\tat_idx !== -1 &&\n\t\t\t(handler_element === document || handler_element === /** @type {any} */ (window))\n\t\t) {\n\t\t\t// This is the fallback document listener or a window listener, but the event was already handled\n\t\t\t// -> ignore, but set handle_at to document/window so that we're resetting the event\n\t\t\t// chain in case someone manually dispatches the same event object again.\n\t\t\t// @ts-expect-error\n\t\t\tevent[event_symbol] = handler_element;\n\t\t\treturn;\n\t\t}\n\n\t\t// We're deliberately not skipping if the index is higher, because\n\t\t// someone could create an event programmatically and emit it multiple times,\n\t\t// in which case we want to handle the whole propagation chain properly each time.\n\t\t// (this will only be a false negative if the event is dispatched multiple times and\n\t\t// the fallback document listener isn't reached in between, but that's super rare)\n\t\tvar handler_idx = path.indexOf(handler_element);\n\t\tif (handler_idx === -1) {\n\t\t\t// handle_idx can theoretically be -1 (happened in some JSDOM testing scenarios with an event listener on the window object)\n\t\t\t// so guard against that, too, and assume that everything was handled at this point.\n\t\t\treturn;\n\t\t}\n\n\t\tif (at_idx <= handler_idx) {\n\t\t\tpath_idx = at_idx;\n\t\t}\n\t}\n\n\tcurrent_target = /** @type {Element} */ (path[path_idx] || event.target);\n\t// there can only be one delegated event per element, and we either already handled the current target,\n\t// or this is the very first target in the chain which has a non-delegated listener, in which case it's safe\n\t// to handle a possible delegated event on it later (through the root delegation listener for example).\n\tif (current_target === handler_element) return;\n\n\t// Proxy currentTarget to correct target\n\tdefine_property(event, 'currentTarget', {\n\t\tconfigurable: true,\n\t\tget() {\n\t\t\treturn current_target || owner_document;\n\t\t}\n\t});\n\n\t// This started because of Chromium issue https://chromestatus.com/feature/5128696823545856,\n\t// where removal or moving of of the DOM can cause sync `blur` events to fire, which can cause logic\n\t// to run inside the current `active_reaction`, which isn't what we want at all. However, on reflection,\n\t// it's probably best that all event handled by Svelte have this behaviour, as we don't really want\n\t// an event handler to run in the context of another reaction or effect.\n\tvar previous_reaction = active_reaction;\n\tvar previous_effect = active_effect;\n\tset_active_reaction(null);\n\tset_active_effect(null);\n\n\ttry {\n\t\t/**\n\t\t * @type {unknown}\n\t\t */\n\t\tvar throw_error;\n\t\t/**\n\t\t * @type {unknown[]}\n\t\t */\n\t\tvar other_errors = [];\n\n\t\twhile (current_target !== null) {\n\t\t\t/** @type {null | Element} */\n\t\t\tvar parent_element =\n\t\t\t\tcurrent_target.assignedSlot ||\n\t\t\t\tcurrent_target.parentNode ||\n\t\t\t\t/** @type {any} */ (current_target).host ||\n\t\t\t\tnull;\n\n\t\t\ttry {\n\t\t\t\t// @ts-expect-error\n\t\t\t\tvar delegated = current_target[event_symbol]?.[event_name];\n\n\t\t\t\tif (\n\t\t\t\t\tdelegated != null &&\n\t\t\t\t\t(!(/** @type {any} */ (current_target).disabled) ||\n\t\t\t\t\t\t// DOM could've been updated already by the time this is reached, so we check this as well\n\t\t\t\t\t\t// -> the target could not have been disabled because it emits the event in the first place\n\t\t\t\t\t\tevent.target === current_target)\n\t\t\t\t) {\n\t\t\t\t\tdelegated.call(current_target, event);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tif (throw_error) {\n\t\t\t\t\tother_errors.push(error);\n\t\t\t\t} else {\n\t\t\t\t\tthrow_error = error;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (event.cancelBubble || parent_element === handler_element || parent_element === null) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcurrent_target = parent_element;\n\t\t}\n\n\t\tif (throw_error) {\n\t\t\tfor (let error of other_errors) {\n\t\t\t\t// Throw the rest of the errors, one-by-one on a microtask\n\t\t\t\tqueueMicrotask(() => {\n\t\t\t\t\tthrow error;\n\t\t\t\t});\n\t\t\t}\n\t\t\tthrow throw_error;\n\t\t}\n\t} finally {\n\t\t// @ts-expect-error is used above\n\t\tevent[event_symbol] = handler_element;\n\t\t// @ts-ignore remove proxy on currentTarget\n\t\tdelete event.currentTarget;\n\t\tset_active_reaction(previous_reaction);\n\t\tset_active_effect(previous_effect);\n\t}\n}\n\n/**\n * In dev, warn if an event handler is not a function, as it means the\n * user probably called the handler or forgot to add a `() =>`\n * @param {() => (event: Event, ...args: any) => void} thunk\n * @param {EventTarget} element\n * @param {[Event, ...any]} args\n * @param {any} component\n * @param {[number, number]} [loc]\n * @param {boolean} [remove_parens]\n */\nexport function apply(\n\tthunk,\n\telement,\n\targs,\n\tcomponent,\n\tloc,\n\thas_side_effects = false,\n\tremove_parens = false\n) {\n\tlet handler;\n\tlet error;\n\n\ttry {\n\t\thandler = thunk();\n\t} catch (e) {\n\t\terror = e;\n\t}\n\n\tif (typeof handler !== 'function' && (has_side_effects || handler != null || error)) {\n\t\tconst filename = component?.[FILENAME];\n\t\tconst location = loc ? ` at ${filename}:${loc[0]}:${loc[1]}` : ` in ${filename}`;\n\t\tconst phase = args[0]?.eventPhase < Event.BUBBLING_PHASE ? 'capture' : '';\n\t\tconst event_name = args[0]?.type + phase;\n\t\tconst description = `\\`${event_name}\\` handler${location}`;\n\t\tconst suggestion = remove_parens ? 'remove the trailing `()`' : 'add a leading `() =>`';\n\n\t\tw.event_handler_invalid(description, suggestion);\n\n\t\tif (error) {\n\t\t\tthrow error;\n\t\t}\n\t}\n\thandler?.apply(element, args);\n}\n","import { create_element } from './operations.js';\n\nconst policy =\n\t// We gotta write it like this because after downleveling the pure comment may end up in the wrong location\n\tglobalThis?.window?.trustedTypes &&\n\t/* @__PURE__ */ globalThis.window.trustedTypes.createPolicy('svelte-trusted-html', {\n\t\t/** @param {string} html */\n\t\tcreateHTML: (html) => {\n\t\t\treturn html;\n\t\t}\n\t});\n\n/** @param {string} html */\nexport function create_trusted_html(html) {\n\treturn /** @type {string} */ (policy?.createHTML(html) ?? html);\n}\n\n/**\n * @param {string} html\n */\nexport function create_fragment_from_html(html) {\n\tvar elem = create_element('template');\n\telem.innerHTML = create_trusted_html(html.replaceAll('', '')); // XHTML compliance\n\treturn elem.content;\n}\n","/** @import { Effect, EffectNodes, TemplateNode } from '#client' */\n/** @import { TemplateStructure } from './types' */\nimport { hydrate_next, hydrate_node, hydrating, set_hydrate_node } from './hydration.js';\nimport {\n\tcreate_text,\n\tget_first_child,\n\tget_next_sibling,\n\tis_firefox,\n\tcreate_element,\n\tcreate_fragment,\n\tcreate_comment,\n\tset_attribute,\n\tmerge_text_nodes\n} from './operations.js';\nimport { create_fragment_from_html } from './reconciler.js';\nimport { active_effect } from '../runtime.js';\nimport {\n\tNAMESPACE_MATHML,\n\tNAMESPACE_SVG,\n\tTEMPLATE_FRAGMENT,\n\tTEMPLATE_USE_IMPORT_NODE,\n\tTEMPLATE_USE_MATHML,\n\tTEMPLATE_USE_SVG\n} from '../../../constants.js';\nimport {\n\tCOMMENT_NODE,\n\tDOCUMENT_FRAGMENT_NODE,\n\tIS_XHTML,\n\tREACTION_RAN,\n\tTEXT_NODE\n} from '#client/constants';\n\nconst TEMPLATE_TAG = IS_XHTML ? 'template' : 'TEMPLATE';\nconst SCRIPT_TAG = IS_XHTML ? 'script' : 'SCRIPT';\n\n/**\n * @param {TemplateNode} start\n * @param {TemplateNode | null} end\n */\nexport function assign_nodes(start, end) {\n\tvar effect = /** @type {Effect} */ (active_effect);\n\tif (effect.nodes === null) {\n\t\teffect.nodes = { start, end, a: null, t: null };\n\t}\n}\n\n/**\n * @param {string} content\n * @param {number} flags\n * @returns {() => Node | Node[]}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function from_html(content, flags) {\n\tvar is_fragment = (flags & TEMPLATE_FRAGMENT) !== 0;\n\tvar use_import_node = (flags & TEMPLATE_USE_IMPORT_NODE) !== 0;\n\n\t/** @type {Node} */\n\tvar node;\n\n\t/**\n\t * Whether or not the first item is a text/element node. If not, we need to\n\t * create an additional comment node to act as `effect.nodes.start`\n\t */\n\tvar has_start = !content.startsWith('');\n\n\treturn () => {\n\t\tif (hydrating) {\n\t\t\tassign_nodes(hydrate_node, null);\n\t\t\treturn hydrate_node;\n\t\t}\n\n\t\tif (node === undefined) {\n\t\t\tnode = create_fragment_from_html(has_start ? content : '' + content);\n\t\t\tif (!is_fragment) node = /** @type {TemplateNode} */ (get_first_child(node));\n\t\t}\n\n\t\tvar clone = /** @type {TemplateNode} */ (\n\t\t\tuse_import_node || is_firefox ? document.importNode(node, true) : node.cloneNode(true)\n\t\t);\n\n\t\tif (is_fragment) {\n\t\t\tvar start = /** @type {TemplateNode} */ (get_first_child(clone));\n\t\t\tvar end = /** @type {TemplateNode} */ (clone.lastChild);\n\n\t\t\tassign_nodes(start, end);\n\t\t} else {\n\t\t\tassign_nodes(clone, clone);\n\t\t}\n\n\t\treturn clone;\n\t};\n}\n\n/**\n * @param {string} content\n * @param {number} flags\n * @param {'svg' | 'math'} ns\n * @returns {() => Node | Node[]}\n */\n/*#__NO_SIDE_EFFECTS__*/\nfunction from_namespace(content, flags, ns = 'svg') {\n\t/**\n\t * Whether or not the first item is a text/element node. If not, we need to\n\t * create an additional comment node to act as `effect.nodes.start`\n\t */\n\tvar has_start = !content.startsWith('');\n\n\tvar is_fragment = (flags & TEMPLATE_FRAGMENT) !== 0;\n\tvar wrapped = `<${ns}>${has_start ? content : '' + content}`;\n\n\t/** @type {Element | DocumentFragment} */\n\tvar node;\n\n\treturn () => {\n\t\tif (hydrating) {\n\t\t\tassign_nodes(hydrate_node, null);\n\t\t\treturn hydrate_node;\n\t\t}\n\n\t\tif (!node) {\n\t\t\tvar fragment = /** @type {DocumentFragment} */ (create_fragment_from_html(wrapped));\n\t\t\tvar root = /** @type {Element} */ (get_first_child(fragment));\n\n\t\t\tif (is_fragment) {\n\t\t\t\tnode = document.createDocumentFragment();\n\t\t\t\twhile (get_first_child(root)) {\n\t\t\t\t\tnode.appendChild(/** @type {TemplateNode} */ (get_first_child(root)));\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tnode = /** @type {Element} */ (get_first_child(root));\n\t\t\t}\n\t\t}\n\n\t\tvar clone = /** @type {TemplateNode} */ (node.cloneNode(true));\n\n\t\tif (is_fragment) {\n\t\t\tvar start = /** @type {TemplateNode} */ (get_first_child(clone));\n\t\t\tvar end = /** @type {TemplateNode} */ (clone.lastChild);\n\n\t\t\tassign_nodes(start, end);\n\t\t} else {\n\t\t\tassign_nodes(clone, clone);\n\t\t}\n\n\t\treturn clone;\n\t};\n}\n\n/**\n * @param {string} content\n * @param {number} flags\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function from_svg(content, flags) {\n\treturn from_namespace(content, flags, 'svg');\n}\n\n/**\n * @param {string} content\n * @param {number} flags\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function from_mathml(content, flags) {\n\treturn from_namespace(content, flags, 'math');\n}\n\n/**\n * @param {TemplateStructure[]} structure\n * @param {typeof NAMESPACE_SVG | typeof NAMESPACE_MATHML | undefined} [ns]\n */\nfunction fragment_from_tree(structure, ns) {\n\tvar fragment = create_fragment();\n\n\tfor (var item of structure) {\n\t\tif (typeof item === 'string') {\n\t\t\tfragment.append(create_text(item));\n\t\t\tcontinue;\n\t\t}\n\n\t\t// if `preserveComments === true`, comments are represented as `['// ']`\n\t\tif (item === undefined || item[0][0] === '/') {\n\t\t\tfragment.append(create_comment(item ? item[0].slice(3) : ''));\n\t\t\tcontinue;\n\t\t}\n\n\t\tconst [name, attributes, ...children] = item;\n\n\t\tconst namespace = name === 'svg' ? NAMESPACE_SVG : name === 'math' ? NAMESPACE_MATHML : ns;\n\n\t\tvar element = create_element(name, namespace, attributes?.is);\n\n\t\tfor (var key in attributes) {\n\t\t\tset_attribute(element, key, attributes[key]);\n\t\t}\n\n\t\tif (children.length > 0) {\n\t\t\tvar target =\n\t\t\t\telement.nodeName === TEMPLATE_TAG\n\t\t\t\t\t? /** @type {HTMLTemplateElement} */ (element).content\n\t\t\t\t\t: element;\n\n\t\t\ttarget.append(\n\t\t\t\tfragment_from_tree(children, element.nodeName === 'foreignObject' ? undefined : namespace)\n\t\t\t);\n\t\t}\n\n\t\tfragment.append(element);\n\t}\n\n\treturn fragment;\n}\n\n/**\n * @param {TemplateStructure[]} structure\n * @param {number} flags\n * @returns {() => Node | Node[]}\n */\n/*#__NO_SIDE_EFFECTS__*/\nexport function from_tree(structure, flags) {\n\tvar is_fragment = (flags & TEMPLATE_FRAGMENT) !== 0;\n\tvar use_import_node = (flags & TEMPLATE_USE_IMPORT_NODE) !== 0;\n\n\t/** @type {Node} */\n\tvar node;\n\n\treturn () => {\n\t\tif (hydrating) {\n\t\t\tassign_nodes(hydrate_node, null);\n\t\t\treturn hydrate_node;\n\t\t}\n\n\t\tif (node === undefined) {\n\t\t\tconst ns =\n\t\t\t\t(flags & TEMPLATE_USE_SVG) !== 0\n\t\t\t\t\t? NAMESPACE_SVG\n\t\t\t\t\t: (flags & TEMPLATE_USE_MATHML) !== 0\n\t\t\t\t\t\t? NAMESPACE_MATHML\n\t\t\t\t\t\t: undefined;\n\n\t\t\tnode = fragment_from_tree(structure, ns);\n\t\t\tif (!is_fragment) node = /** @type {TemplateNode} */ (get_first_child(node));\n\t\t}\n\n\t\tvar clone = /** @type {TemplateNode} */ (\n\t\t\tuse_import_node || is_firefox ? document.importNode(node, true) : node.cloneNode(true)\n\t\t);\n\n\t\tif (is_fragment) {\n\t\t\tvar start = /** @type {TemplateNode} */ (get_first_child(clone));\n\t\t\tvar end = /** @type {TemplateNode} */ (clone.lastChild);\n\n\t\t\tassign_nodes(start, end);\n\t\t} else {\n\t\t\tassign_nodes(clone, clone);\n\t\t}\n\n\t\treturn clone;\n\t};\n}\n\n/**\n * @param {() => Element | DocumentFragment} fn\n */\nexport function with_script(fn) {\n\treturn () => run_scripts(fn());\n}\n\n/**\n * Creating a document fragment from HTML that contains script tags will not execute\n * the scripts. We need to replace the script tags with new ones so that they are executed.\n * @param {Element | DocumentFragment} node\n * @returns {Node | Node[]}\n */\nfunction run_scripts(node) {\n\t// scripts were SSR'd, in which case they will run\n\tif (hydrating) return node;\n\n\tconst is_fragment = node.nodeType === DOCUMENT_FRAGMENT_NODE;\n\tconst scripts =\n\t\t/** @type {HTMLElement} */ (node).nodeName === SCRIPT_TAG\n\t\t\t? [/** @type {HTMLScriptElement} */ (node)]\n\t\t\t: node.querySelectorAll('script');\n\n\tconst effect = /** @type {Effect & { nodes: EffectNodes }} */ (active_effect);\n\n\tfor (const script of scripts) {\n\t\tconst clone = create_element('script');\n\t\tfor (var attribute of script.attributes) {\n\t\t\tclone.setAttribute(attribute.name, attribute.value);\n\t\t}\n\n\t\tclone.textContent = script.textContent;\n\n\t\t// The script has changed - if it's at the edges, the effect now points at dead nodes\n\t\tif (is_fragment ? node.firstChild === script : node === script) {\n\t\t\teffect.nodes.start = clone;\n\t\t}\n\t\tif (is_fragment ? node.lastChild === script : node === script) {\n\t\t\teffect.nodes.end = clone;\n\t\t}\n\n\t\tscript.replaceWith(clone);\n\t}\n\treturn node;\n}\n\n/**\n * Don't mark this as side-effect-free, hydration needs to walk all nodes\n * @param {any} value\n */\nexport function text(value = '') {\n\tif (!hydrating) {\n\t\tvar t = create_text(value + '');\n\t\tassign_nodes(t, t);\n\t\treturn t;\n\t}\n\n\tvar node = hydrate_node;\n\n\tif (node.nodeType !== TEXT_NODE) {\n\t\t// if an {expression} is empty during SSR, we need to insert an empty text node\n\t\tnode.before((node = create_text()));\n\t\tset_hydrate_node(node);\n\t} else {\n\t\tmerge_text_nodes(/** @type {Text} */ (node));\n\t}\n\n\tassign_nodes(node, node);\n\treturn node;\n}\n\n/**\n * @returns {TemplateNode | DocumentFragment}\n */\nexport function comment() {\n\t// we're not delegating to `template` here for performance reasons\n\tif (hydrating) {\n\t\tassign_nodes(hydrate_node, null);\n\t\treturn hydrate_node;\n\t}\n\n\tvar frag = document.createDocumentFragment();\n\tvar start = document.createComment('');\n\tvar anchor = create_text();\n\tfrag.append(start, anchor);\n\n\tassign_nodes(start, anchor);\n\n\treturn frag;\n}\n\n/**\n * Assign the created (or in hydration mode, traversed) dom elements to the current block\n * and insert the elements into the dom (in client mode).\n * @param {Text | Comment | Element} anchor\n * @param {DocumentFragment | Element} dom\n */\nexport function append(anchor, dom) {\n\tif (hydrating) {\n\t\tvar effect = /** @type {Effect & { nodes: EffectNodes }} */ (active_effect);\n\n\t\t// When hydrating and outer component and an inner component is async, i.e. blocked on a promise,\n\t\t// then by the time the inner resolves we have already advanced to the end of the hydrated nodes\n\t\t// of the parent component. Check for defined for that reason to avoid rewinding the parent's end marker.\n\t\tif ((effect.f & REACTION_RAN) === 0 || effect.nodes.end === null) {\n\t\t\teffect.nodes.end = hydrate_node;\n\t\t}\n\n\t\thydrate_next();\n\t\treturn;\n\t}\n\n\tif (anchor === null) {\n\t\t// edge case — void `` with content\n\t\treturn;\n\t}\n\n\tanchor.before(/** @type {Node} */ (dom));\n}\n\n/**\n * Create (or hydrate) an unique UID for the component instance.\n */\nexport function props_id() {\n\tif (\n\t\thydrating &&\n\t\thydrate_node &&\n\t\thydrate_node.nodeType === COMMENT_NODE &&\n\t\thydrate_node.textContent?.startsWith(`$`)\n\t) {\n\t\tconst id = hydrate_node.textContent.substring(1);\n\t\thydrate_next();\n\t\treturn id;\n\t}\n\n\t// @ts-expect-error This way we ensure the id is unique even across Svelte runtimes\n\t(window.__svelte ??= {}).uid ??= 1;\n\n\t// @ts-expect-error\n\treturn `c${window.__svelte.uid++}`;\n}\n","const regex_return_characters = /\\r/g;\n\n/**\n * @param {string} str\n * @returns {string}\n */\nexport function hash(str) {\n\tstr = str.replace(regex_return_characters, '');\n\tlet hash = 5381;\n\tlet i = str.length;\n\n\twhile (i--) hash = ((hash << 5) - hash) ^ str.charCodeAt(i);\n\treturn (hash >>> 0).toString(36);\n}\n\nconst VOID_ELEMENT_NAMES = [\n\t'area',\n\t'base',\n\t'br',\n\t'col',\n\t'command',\n\t'embed',\n\t'hr',\n\t'img',\n\t'input',\n\t'keygen',\n\t'link',\n\t'meta',\n\t'param',\n\t'source',\n\t'track',\n\t'wbr'\n];\n\n/**\n * Returns `true` if `name` is of a void element\n * @param {string} name\n */\nexport function is_void(name) {\n\treturn VOID_ELEMENT_NAMES.includes(name) || name.toLowerCase() === '!doctype';\n}\n\nconst RESERVED_WORDS = [\n\t'arguments',\n\t'await',\n\t'break',\n\t'case',\n\t'catch',\n\t'class',\n\t'const',\n\t'continue',\n\t'debugger',\n\t'default',\n\t'delete',\n\t'do',\n\t'else',\n\t'enum',\n\t'eval',\n\t'export',\n\t'extends',\n\t'false',\n\t'finally',\n\t'for',\n\t'function',\n\t'if',\n\t'implements',\n\t'import',\n\t'in',\n\t'instanceof',\n\t'interface',\n\t'let',\n\t'new',\n\t'null',\n\t'package',\n\t'private',\n\t'protected',\n\t'public',\n\t'return',\n\t'static',\n\t'super',\n\t'switch',\n\t'this',\n\t'throw',\n\t'true',\n\t'try',\n\t'typeof',\n\t'var',\n\t'void',\n\t'while',\n\t'with',\n\t'yield'\n];\n\n/**\n * Returns `true` if `word` is a reserved JavaScript keyword\n * @param {string} word\n */\nexport function is_reserved(word) {\n\treturn RESERVED_WORDS.includes(word);\n}\n\n/**\n * @param {string} name\n */\nexport function is_capture_event(name) {\n\treturn name.endsWith('capture') && name !== 'gotpointercapture' && name !== 'lostpointercapture';\n}\n\n/** List of Element events that will be delegated */\nconst DELEGATED_EVENTS = [\n\t'beforeinput',\n\t'click',\n\t'change',\n\t'dblclick',\n\t'contextmenu',\n\t'focusin',\n\t'focusout',\n\t'input',\n\t'keydown',\n\t'keyup',\n\t'mousedown',\n\t'mousemove',\n\t'mouseout',\n\t'mouseover',\n\t'mouseup',\n\t'pointerdown',\n\t'pointermove',\n\t'pointerout',\n\t'pointerover',\n\t'pointerup',\n\t'touchend',\n\t'touchmove',\n\t'touchstart'\n];\n\n/**\n * Returns `true` if `event_name` is a delegated event\n * @param {string} event_name\n */\nexport function can_delegate_event(event_name) {\n\treturn DELEGATED_EVENTS.includes(event_name);\n}\n\n/**\n * Attributes that are boolean, i.e. they are present or not present.\n */\nconst DOM_BOOLEAN_ATTRIBUTES = [\n\t'allowfullscreen',\n\t'async',\n\t'autofocus',\n\t'autoplay',\n\t'checked',\n\t'controls',\n\t'default',\n\t'disabled',\n\t'formnovalidate',\n\t'indeterminate',\n\t'inert',\n\t'ismap',\n\t'loop',\n\t'multiple',\n\t'muted',\n\t'nomodule',\n\t'novalidate',\n\t'open',\n\t'playsinline',\n\t'readonly',\n\t'required',\n\t'reversed',\n\t'seamless',\n\t'selected',\n\t'webkitdirectory',\n\t'defer',\n\t'disablepictureinpicture',\n\t'disableremoteplayback'\n];\n\n/**\n * Returns `true` if `name` is a boolean attribute\n * @param {string} name\n */\nexport function is_boolean_attribute(name) {\n\treturn DOM_BOOLEAN_ATTRIBUTES.includes(name);\n}\n\n/**\n * @type {Record}\n * List of attribute names that should be aliased to their property names\n * because they behave differently between setting them as an attribute and\n * setting them as a property.\n */\nconst ATTRIBUTE_ALIASES = {\n\t// no `class: 'className'` because we handle that separately\n\tformnovalidate: 'formNoValidate',\n\tismap: 'isMap',\n\tnomodule: 'noModule',\n\tplaysinline: 'playsInline',\n\treadonly: 'readOnly',\n\tdefaultvalue: 'defaultValue',\n\tdefaultchecked: 'defaultChecked',\n\tsrcobject: 'srcObject',\n\tnovalidate: 'noValidate',\n\tallowfullscreen: 'allowFullscreen',\n\tdisablepictureinpicture: 'disablePictureInPicture',\n\tdisableremoteplayback: 'disableRemotePlayback'\n};\n\n/**\n * @param {string} name\n */\nexport function normalize_attribute(name) {\n\tname = name.toLowerCase();\n\treturn ATTRIBUTE_ALIASES[name] ?? name;\n}\n\nconst DOM_PROPERTIES = [\n\t...DOM_BOOLEAN_ATTRIBUTES,\n\t'formNoValidate',\n\t'isMap',\n\t'noModule',\n\t'playsInline',\n\t'readOnly',\n\t'value',\n\t'volume',\n\t'defaultValue',\n\t'defaultChecked',\n\t'srcObject',\n\t'noValidate',\n\t'allowFullscreen',\n\t'disablePictureInPicture',\n\t'disableRemotePlayback'\n];\n\n/**\n * @param {string} name\n */\nexport function is_dom_property(name) {\n\treturn DOM_PROPERTIES.includes(name);\n}\n\nconst NON_STATIC_PROPERTIES = ['autofocus', 'muted', 'defaultValue', 'defaultChecked'];\n\n/**\n * Returns `true` if the given attribute cannot be set through the template\n * string, i.e. needs some kind of JavaScript handling to work.\n * @param {string} name\n */\nexport function cannot_be_set_statically(name) {\n\treturn NON_STATIC_PROPERTIES.includes(name);\n}\n\n/**\n * Subset of delegated events which should be passive by default.\n * These two are already passive via browser defaults on window, document and body.\n * But since\n * - we're delegating them\n * - they happen often\n * - they apply to mobile which is generally less performant\n * we're marking them as passive by default for other elements, too.\n */\nconst PASSIVE_EVENTS = ['touchstart', 'touchmove'];\n\n/**\n * Returns `true` if `name` is a passive event\n * @param {string} name\n */\nexport function is_passive_event(name) {\n\treturn PASSIVE_EVENTS.includes(name);\n}\n\nconst CONTENT_EDITABLE_BINDINGS = ['textContent', 'innerHTML', 'innerText'];\n\n/** @param {string} name */\nexport function is_content_editable_binding(name) {\n\treturn CONTENT_EDITABLE_BINDINGS.includes(name);\n}\n\nconst LOAD_ERROR_ELEMENTS = [\n\t'body',\n\t'embed',\n\t'iframe',\n\t'img',\n\t'link',\n\t'object',\n\t'script',\n\t'style',\n\t'track'\n];\n\n/**\n * Returns `true` if the element emits `load` and `error` events\n * @param {string} name\n */\nexport function is_load_error_element(name) {\n\treturn LOAD_ERROR_ELEMENTS.includes(name);\n}\n\nconst SVG_ELEMENTS = [\n\t'altGlyph',\n\t'altGlyphDef',\n\t'altGlyphItem',\n\t'animate',\n\t'animateColor',\n\t'animateMotion',\n\t'animateTransform',\n\t'circle',\n\t'clipPath',\n\t'color-profile',\n\t'cursor',\n\t'defs',\n\t'desc',\n\t'discard',\n\t'ellipse',\n\t'feBlend',\n\t'feColorMatrix',\n\t'feComponentTransfer',\n\t'feComposite',\n\t'feConvolveMatrix',\n\t'feDiffuseLighting',\n\t'feDisplacementMap',\n\t'feDistantLight',\n\t'feDropShadow',\n\t'feFlood',\n\t'feFuncA',\n\t'feFuncB',\n\t'feFuncG',\n\t'feFuncR',\n\t'feGaussianBlur',\n\t'feImage',\n\t'feMerge',\n\t'feMergeNode',\n\t'feMorphology',\n\t'feOffset',\n\t'fePointLight',\n\t'feSpecularLighting',\n\t'feSpotLight',\n\t'feTile',\n\t'feTurbulence',\n\t'filter',\n\t'font',\n\t'font-face',\n\t'font-face-format',\n\t'font-face-name',\n\t'font-face-src',\n\t'font-face-uri',\n\t'foreignObject',\n\t'g',\n\t'glyph',\n\t'glyphRef',\n\t'hatch',\n\t'hatchpath',\n\t'hkern',\n\t'image',\n\t'line',\n\t'linearGradient',\n\t'marker',\n\t'mask',\n\t'mesh',\n\t'meshgradient',\n\t'meshpatch',\n\t'meshrow',\n\t'metadata',\n\t'missing-glyph',\n\t'mpath',\n\t'path',\n\t'pattern',\n\t'polygon',\n\t'polyline',\n\t'radialGradient',\n\t'rect',\n\t'set',\n\t'solidcolor',\n\t'stop',\n\t'svg',\n\t'switch',\n\t'symbol',\n\t'text',\n\t'textPath',\n\t'tref',\n\t'tspan',\n\t'unknown',\n\t'use',\n\t'view',\n\t'vkern'\n];\n\n/** @param {string} name */\nexport function is_svg(name) {\n\treturn SVG_ELEMENTS.includes(name);\n}\n\nconst MATHML_ELEMENTS = [\n\t'annotation',\n\t'annotation-xml',\n\t'maction',\n\t'math',\n\t'merror',\n\t'mfrac',\n\t'mi',\n\t'mmultiscripts',\n\t'mn',\n\t'mo',\n\t'mover',\n\t'mpadded',\n\t'mphantom',\n\t'mprescripts',\n\t'mroot',\n\t'mrow',\n\t'ms',\n\t'mspace',\n\t'msqrt',\n\t'mstyle',\n\t'msub',\n\t'msubsup',\n\t'msup',\n\t'mtable',\n\t'mtd',\n\t'mtext',\n\t'mtr',\n\t'munder',\n\t'munderover',\n\t'semantics'\n];\n\n/** @param {string} name */\nexport function is_mathml(name) {\n\treturn MATHML_ELEMENTS.includes(name);\n}\n\nconst STATE_CREATION_RUNES = /** @type {const} */ ([\n\t'$state',\n\t'$state.raw',\n\t'$derived',\n\t'$derived.by'\n]);\n\nconst RUNES = /** @type {const} */ ([\n\t...STATE_CREATION_RUNES,\n\t'$state.eager',\n\t'$state.snapshot',\n\t'$props',\n\t'$props.id',\n\t'$bindable',\n\t'$effect',\n\t'$effect.pre',\n\t'$effect.tracking',\n\t'$effect.root',\n\t'$effect.pending',\n\t'$inspect',\n\t'$inspect().with',\n\t'$inspect.trace',\n\t'$host'\n]);\n\n/** @typedef {typeof RUNES[number]} RuneName */\n\n/**\n * @param {string} name\n * @returns {name is RuneName}\n */\nexport function is_rune(name) {\n\treturn RUNES.includes(/** @type {RuneName} */ (name));\n}\n\n/** @typedef {typeof STATE_CREATION_RUNES[number]} StateCreationRuneName */\n\n/**\n * @param {string} name\n * @returns {name is StateCreationRuneName}\n */\nexport function is_state_creation_rune(name) {\n\treturn STATE_CREATION_RUNES.includes(/** @type {StateCreationRuneName} */ (name));\n}\n\n/** List of elements that require raw contents and should not have SSR comments put in them */\nconst RAW_TEXT_ELEMENTS = /** @type {const} */ (['textarea', 'script', 'style', 'title']);\n\n/** @param {string} name */\nexport function is_raw_text_element(name) {\n\treturn RAW_TEXT_ELEMENTS.includes(/** @type {typeof RAW_TEXT_ELEMENTS[number]} */ (name));\n}\n\n// Matches valid HTML/SVG/MathML element names and custom element names.\n// https://html.spec.whatwg.org/multipage/custom-elements.html#valid-custom-element-name\n//\n// Standard elements: ASCII alpha start, followed by ASCII alphanumerics.\n// Custom elements: ASCII alpha start, followed by any mix of PCENChar (which\n// includes ASCII alphanumerics, `-`, `.`, `_`, and specified Unicode ranges),\n// with at least one hyphen required somewhere after the first character.\n//\n// Rejects strings containing whitespace, quotes, angle brackets, slashes, equals,\n// or other characters that could break out of a tag-name token and enable markup injection.\nexport const REGEX_VALID_TAG_NAME =\n\t/^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9.\\-_\\u00B7\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u203F-\\u2040\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\u{10000}-\\u{EFFFF}]+)*$/u;\n\n/**\n * Prevent devtools trying to make `location` a clickable link by inserting a zero-width space\n * @template {string | undefined} T\n * @param {T} location\n * @returns {T};\n */\nexport function sanitize_location(location) {\n\treturn /** @type {T} */ (location?.replace(/\\//g, '/\\u200b'));\n}\n","/** @import { ComponentContext, Effect, EffectNodes, TemplateNode } from '#client' */\n/** @import { Component, ComponentType, SvelteComponent, MountOptions } from '../../index.js' */\nimport { DEV } from 'esm-env';\nimport {\n\tclear_text_content,\n\tcreate_text,\n\tget_first_child,\n\tget_next_sibling,\n\tinit_operations\n} from './dom/operations.js';\nimport { HYDRATION_END, HYDRATION_ERROR, HYDRATION_START } from '../../constants.js';\nimport { active_effect } from './runtime.js';\nimport { push, pop, component_context } from './context.js';\nimport { component_root } from './reactivity/effects.js';\nimport { hydrate_node, hydrating, set_hydrate_node, set_hydrating } from './dom/hydration.js';\nimport { array_from } from '../shared/utils.js';\nimport {\n\tall_registered_events,\n\thandle_event_propagation,\n\troot_event_handles\n} from './dom/elements/events.js';\nimport * as w from './warnings.js';\nimport * as e from './errors.js';\nimport { assign_nodes } from './dom/template.js';\nimport { is_passive_event } from '../../utils.js';\nimport { COMMENT_NODE, STATE_SYMBOL } from './constants.js';\nimport { boundary } from './dom/blocks/boundary.js';\n\n/**\n * This is normally true — block effects should run their intro transitions —\n * but is false during hydration (unless `options.intro` is `true`) and\n * when creating the children of a `` that just changed tag\n */\nexport let should_intro = true;\n\n/** @param {boolean} value */\nexport function set_should_intro(value) {\n\tshould_intro = value;\n}\n\n/**\n * @param {Element} text\n * @param {string} value\n * @returns {void}\n */\nexport function set_text(text, value) {\n\t// For objects, we apply string coercion (which might make things like $state array references in the template reactive) before diffing\n\tvar str = value == null ? '' : typeof value === 'object' ? `${value}` : value;\n\t// @ts-expect-error\n\tif (str !== (text.__t ??= text.nodeValue)) {\n\t\t// @ts-expect-error\n\t\ttext.__t = str;\n\t\ttext.nodeValue = `${str}`;\n\t}\n}\n\n/**\n * Mounts a component to the given target and returns the exports and potentially the props (if compiled with `accessors: true`) of the component.\n * Transitions will play during the initial render unless the `intro` option is set to `false`.\n *\n * @template {Record} Props\n * @template {Record} Exports\n * @param {ComponentType> | Component} component\n * @param {MountOptions} options\n * @returns {Exports}\n */\nexport function mount(component, options) {\n\treturn _mount(component, options);\n}\n\n/**\n * Hydrates a component on the given target and returns the exports and potentially the props (if compiled with `accessors: true`) of the component\n *\n * @template {Record} Props\n * @template {Record} Exports\n * @param {ComponentType> | Component} component\n * @param {{} extends Props ? {\n * \t\ttarget: Document | Element | ShadowRoot;\n * \t\tprops?: Props;\n * \t\tevents?: Record any>;\n * \tcontext?: Map;\n * \t\tintro?: boolean;\n * \t\trecover?: boolean;\n *\t\ttransformError?: (error: unknown) => unknown;\n * \t} : {\n * \t\ttarget: Document | Element | ShadowRoot;\n * \t\tprops: Props;\n * \t\tevents?: Record any>;\n * \tcontext?: Map;\n * \t\tintro?: boolean;\n * \t\trecover?: boolean;\n *\t\ttransformError?: (error: unknown) => unknown;\n * \t}} options\n * @returns {Exports}\n */\nexport function hydrate(component, options) {\n\tinit_operations();\n\toptions.intro = options.intro ?? false;\n\tconst target = options.target;\n\tconst was_hydrating = hydrating;\n\tconst previous_hydrate_node = hydrate_node;\n\n\ttry {\n\t\tvar anchor = get_first_child(target);\n\n\t\twhile (\n\t\t\tanchor &&\n\t\t\t(anchor.nodeType !== COMMENT_NODE || /** @type {Comment} */ (anchor).data !== HYDRATION_START)\n\t\t) {\n\t\t\tanchor = get_next_sibling(anchor);\n\t\t}\n\n\t\tif (!anchor) {\n\t\t\tthrow HYDRATION_ERROR;\n\t\t}\n\n\t\tset_hydrating(true);\n\t\tset_hydrate_node(/** @type {Comment} */ (anchor));\n\n\t\tconst instance = _mount(component, { ...options, anchor });\n\n\t\tset_hydrating(false);\n\n\t\treturn /** @type {Exports} */ (instance);\n\t} catch (error) {\n\t\t// re-throw Svelte errors - they are certainly not related to hydration\n\t\tif (\n\t\t\terror instanceof Error &&\n\t\t\terror.message.split('\\n').some((line) => line.startsWith('https://svelte.dev/e/'))\n\t\t) {\n\t\t\tthrow error;\n\t\t}\n\t\tif (error !== HYDRATION_ERROR) {\n\t\t\t// eslint-disable-next-line no-console\n\t\t\tconsole.warn('Failed to hydrate: ', error);\n\t\t}\n\n\t\tif (options.recover === false) {\n\t\t\te.hydration_failed();\n\t\t}\n\n\t\t// If an error occurred above, the operations might not yet have been initialised.\n\t\tinit_operations();\n\t\tclear_text_content(target);\n\n\t\tset_hydrating(false);\n\t\treturn mount(component, options);\n\t} finally {\n\t\tset_hydrating(was_hydrating);\n\t\tset_hydrate_node(previous_hydrate_node);\n\t}\n}\n\n/** @type {Map>} */\nconst listeners = new Map();\n\n/**\n * @template {Record} Exports\n * @param {ComponentType> | Component} Component\n * @param {MountOptions} options\n * @returns {Exports}\n */\nfunction _mount(\n\tComponent,\n\t{ target, anchor, props = {}, events, context, intro = true, transformError }\n) {\n\tinit_operations();\n\n\t/** @type {Exports} */\n\t// @ts-expect-error will be defined because the render effect runs synchronously\n\tvar component = undefined;\n\n\tvar unmount = component_root(() => {\n\t\tvar anchor_node = anchor ?? target.appendChild(create_text());\n\n\t\tboundary(\n\t\t\t/** @type {TemplateNode} */ (anchor_node),\n\t\t\t{\n\t\t\t\tpending: () => {}\n\t\t\t},\n\t\t\t(anchor_node) => {\n\t\t\t\tpush({});\n\t\t\t\tvar ctx = /** @type {ComponentContext} */ (component_context);\n\t\t\t\tif (context) ctx.c = context;\n\n\t\t\t\tif (events) {\n\t\t\t\t\t// We can't spread the object or else we'd lose the state proxy stuff, if it is one\n\t\t\t\t\t/** @type {any} */ (props).$$events = events;\n\t\t\t\t}\n\n\t\t\t\tif (hydrating) {\n\t\t\t\t\tassign_nodes(/** @type {TemplateNode} */ (anchor_node), null);\n\t\t\t\t}\n\n\t\t\t\tshould_intro = intro;\n\t\t\t\t// @ts-expect-error the public typings are not what the actual function looks like\n\t\t\t\tcomponent = Component(anchor_node, props) || {};\n\t\t\t\tshould_intro = true;\n\n\t\t\t\tif (hydrating) {\n\t\t\t\t\t/** @type {Effect & { nodes: EffectNodes }} */ (active_effect).nodes.end = hydrate_node;\n\n\t\t\t\t\tif (\n\t\t\t\t\t\thydrate_node === null ||\n\t\t\t\t\t\thydrate_node.nodeType !== COMMENT_NODE ||\n\t\t\t\t\t\t/** @type {Comment} */ (hydrate_node).data !== HYDRATION_END\n\t\t\t\t\t) {\n\t\t\t\t\t\tw.hydration_mismatch();\n\t\t\t\t\t\tthrow HYDRATION_ERROR;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tpop();\n\t\t\t},\n\t\t\ttransformError\n\t\t);\n\n\t\t// Setup event delegation _after_ component is mounted - if an error would happen during mount, it would otherwise not be cleaned up\n\t\t/** @type {Set} */\n\t\tvar registered_events = new Set();\n\n\t\t/** @param {Array} events */\n\t\tvar event_handle = (events) => {\n\t\t\tfor (var i = 0; i < events.length; i++) {\n\t\t\t\tvar event_name = events[i];\n\n\t\t\t\tif (registered_events.has(event_name)) continue;\n\t\t\t\tregistered_events.add(event_name);\n\n\t\t\t\tvar passive = is_passive_event(event_name);\n\n\t\t\t\t// Add the event listener to both the container and the document.\n\t\t\t\t// The container listener ensures we catch events from within in case\n\t\t\t\t// the outer content stops propagation of the event.\n\t\t\t\t//\n\t\t\t\t// The document listener ensures we catch events that originate from elements that were\n\t\t\t\t// manually moved outside of the container (e.g. via manual portals).\n\t\t\t\tfor (const node of [target, document]) {\n\t\t\t\t\tvar counts = listeners.get(node);\n\n\t\t\t\t\tif (counts === undefined) {\n\t\t\t\t\t\tcounts = new Map();\n\t\t\t\t\t\tlisteners.set(node, counts);\n\t\t\t\t\t}\n\n\t\t\t\t\tvar count = counts.get(event_name);\n\n\t\t\t\t\tif (count === undefined) {\n\t\t\t\t\t\tnode.addEventListener(event_name, handle_event_propagation, { passive });\n\t\t\t\t\t\tcounts.set(event_name, 1);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcounts.set(event_name, count + 1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\tevent_handle(array_from(all_registered_events));\n\t\troot_event_handles.add(event_handle);\n\n\t\treturn () => {\n\t\t\tfor (var event_name of registered_events) {\n\t\t\t\tfor (const node of [target, document]) {\n\t\t\t\t\tvar counts = /** @type {Map} */ (listeners.get(node));\n\t\t\t\t\tvar count = /** @type {number} */ (counts.get(event_name));\n\n\t\t\t\t\tif (--count == 0) {\n\t\t\t\t\t\tnode.removeEventListener(event_name, handle_event_propagation);\n\t\t\t\t\t\tcounts.delete(event_name);\n\n\t\t\t\t\t\tif (counts.size === 0) {\n\t\t\t\t\t\t\tlisteners.delete(node);\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcounts.set(event_name, count);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\troot_event_handles.delete(event_handle);\n\n\t\t\tif (anchor_node !== anchor) {\n\t\t\t\tanchor_node.parentNode?.removeChild(anchor_node);\n\t\t\t}\n\t\t};\n\t});\n\n\tmounted_components.set(component, unmount);\n\treturn component;\n}\n\n/**\n * References of the components that were mounted or hydrated.\n * Uses a `WeakMap` to avoid memory leaks.\n */\nlet mounted_components = new WeakMap();\n\n/**\n * Unmounts a component that was previously mounted using `mount` or `hydrate`.\n *\n * Since 5.13.0, if `options.outro` is `true`, [transitions](https://svelte.dev/docs/svelte/transition) will play before the component is removed from the DOM.\n *\n * Returns a `Promise` that resolves after transitions have completed if `options.outro` is true, or immediately otherwise (prior to 5.13.0, returns `void`).\n *\n * ```js\n * import { mount, unmount } from 'svelte';\n * import App from './App.svelte';\n *\n * const app = mount(App, { target: document.body });\n *\n * // later...\n * unmount(app, { outro: true });\n * ```\n * @param {Record} component\n * @param {{ outro?: boolean }} [options]\n * @returns {Promise}\n */\nexport function unmount(component, options) {\n\tconst fn = mounted_components.get(component);\n\n\tif (fn) {\n\t\tmounted_components.delete(component);\n\t\treturn fn(options);\n\t}\n\n\tif (DEV) {\n\t\tif (STATE_SYMBOL in component) {\n\t\t\tw.state_proxy_unmount();\n\t\t} else {\n\t\t\tw.lifecycle_double_unmount();\n\t\t}\n\t}\n\n\treturn Promise.resolve();\n}\n","/** @import { Effect, TemplateNode } from '#client' */\nimport { Batch, current_batch } from '../../reactivity/batch.js';\nimport {\n\tbranch,\n\tdestroy_effect,\n\tmove_effect,\n\tpause_effect,\n\tresume_effect\n} from '../../reactivity/effects.js';\nimport { HMR_ANCHOR } from '../../constants.js';\nimport { hydrate_node, hydrating } from '../hydration.js';\nimport { create_text, should_defer_append } from '../operations.js';\nimport { DEV } from 'esm-env';\n\n/**\n * @typedef {{ effect: Effect, fragment: DocumentFragment }} Branch\n */\n\n/**\n * @template Key\n */\nexport class BranchManager {\n\t/** @type {TemplateNode} */\n\tanchor;\n\n\t/** @type {Map} */\n\t#batches = new Map();\n\n\t/**\n\t * Map of keys to effects that are currently rendered in the DOM.\n\t * These effects are visible and actively part of the document tree.\n\t * Example:\n\t * ```\n\t * {#if condition}\n\t * \tfoo\n\t * {:else}\n\t * \tbar\n\t * {/if}\n\t * ```\n\t * Can result in the entries `true->Effect` and `false->Effect`\n\t * @type {Map}\n\t */\n\t#onscreen = new Map();\n\n\t/**\n\t * Similar to #onscreen with respect to the keys, but contains branches that are not yet\n\t * in the DOM, because their insertion is deferred.\n\t * @type {Map}\n\t */\n\t#offscreen = new Map();\n\n\t/**\n\t * Keys of effects that are currently outroing\n\t * @type {Set}\n\t */\n\t#outroing = new Set();\n\n\t/**\n\t * Whether to pause (i.e. outro) on change, or destroy immediately.\n\t * This is necessary for ``\n\t */\n\t#transition = true;\n\n\t/**\n\t * @param {TemplateNode} anchor\n\t * @param {boolean} transition\n\t */\n\tconstructor(anchor, transition = true) {\n\t\tthis.anchor = anchor;\n\t\tthis.#transition = transition;\n\t}\n\n\t/**\n\t * @param {Batch} batch\n\t */\n\t#commit = (batch) => {\n\t\t// if this batch was made obsolete, bail\n\t\tif (!this.#batches.has(batch)) return;\n\n\t\tvar key = /** @type {Key} */ (this.#batches.get(batch));\n\n\t\tvar onscreen = this.#onscreen.get(key);\n\n\t\tif (onscreen) {\n\t\t\t// effect is already in the DOM — abort any current outro\n\t\t\tresume_effect(onscreen);\n\t\t\tthis.#outroing.delete(key);\n\t\t} else {\n\t\t\t// effect is currently offscreen. put it in the DOM\n\t\t\tvar offscreen = this.#offscreen.get(key);\n\n\t\t\tif (offscreen) {\n\t\t\t\tthis.#onscreen.set(key, offscreen.effect);\n\t\t\t\tthis.#offscreen.delete(key);\n\n\t\t\t\tif (DEV) {\n\t\t\t\t\t// Tell hmr.js about the anchor it should use for updates,\n\t\t\t\t\t// since the initial one will be removed\n\t\t\t\t\t/** @type {any} */ (offscreen.fragment.lastChild)[HMR_ANCHOR] = this.anchor;\n\t\t\t\t}\n\n\t\t\t\t// remove the anchor...\n\t\t\t\t/** @type {TemplateNode} */ (offscreen.fragment.lastChild).remove();\n\n\t\t\t\t// ...and append the fragment\n\t\t\t\tthis.anchor.before(offscreen.fragment);\n\t\t\t\tonscreen = offscreen.effect;\n\t\t\t}\n\t\t}\n\n\t\tfor (const [b, k] of this.#batches) {\n\t\t\tthis.#batches.delete(b);\n\n\t\t\tif (b === batch) {\n\t\t\t\t// keep values for newer batches\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tconst offscreen = this.#offscreen.get(k);\n\n\t\t\tif (offscreen) {\n\t\t\t\t// for older batches, destroy offscreen effects\n\t\t\t\t// as they will never be committed\n\t\t\t\tdestroy_effect(offscreen.effect);\n\t\t\t\tthis.#offscreen.delete(k);\n\t\t\t}\n\t\t}\n\n\t\t// outro/destroy all onscreen effects...\n\t\tfor (const [k, effect] of this.#onscreen) {\n\t\t\t// ...except the one that was just committed\n\t\t\t// or those that are already outroing (else the transition is aborted and the effect destroyed right away)\n\t\t\tif (k === key || this.#outroing.has(k)) continue;\n\n\t\t\tconst on_destroy = () => {\n\t\t\t\tconst keys = Array.from(this.#batches.values());\n\n\t\t\t\tif (keys.includes(k)) {\n\t\t\t\t\t// keep the effect offscreen, as another batch will need it\n\t\t\t\t\tvar fragment = document.createDocumentFragment();\n\t\t\t\t\tmove_effect(effect, fragment);\n\n\t\t\t\t\tfragment.append(create_text()); // TODO can we avoid this?\n\n\t\t\t\t\tthis.#offscreen.set(k, { effect, fragment });\n\t\t\t\t} else {\n\t\t\t\t\tdestroy_effect(effect);\n\t\t\t\t}\n\n\t\t\t\tthis.#outroing.delete(k);\n\t\t\t\tthis.#onscreen.delete(k);\n\t\t\t};\n\n\t\t\tif (this.#transition || !onscreen) {\n\t\t\t\tthis.#outroing.add(k);\n\t\t\t\tpause_effect(effect, on_destroy, false);\n\t\t\t} else {\n\t\t\t\ton_destroy();\n\t\t\t}\n\t\t}\n\t};\n\n\t/**\n\t * @param {Batch} batch\n\t */\n\t#discard = (batch) => {\n\t\tthis.#batches.delete(batch);\n\n\t\tconst keys = Array.from(this.#batches.values());\n\n\t\tfor (const [k, branch] of this.#offscreen) {\n\t\t\tif (!keys.includes(k)) {\n\t\t\t\tdestroy_effect(branch.effect);\n\t\t\t\tthis.#offscreen.delete(k);\n\t\t\t}\n\t\t}\n\t};\n\n\t/**\n\t *\n\t * @param {any} key\n\t * @param {null | ((target: TemplateNode) => void)} fn\n\t */\n\tensure(key, fn) {\n\t\tvar batch = /** @type {Batch} */ (current_batch);\n\t\tvar defer = should_defer_append();\n\n\t\tif (fn && !this.#onscreen.has(key) && !this.#offscreen.has(key)) {\n\t\t\tif (defer) {\n\t\t\t\tvar fragment = document.createDocumentFragment();\n\t\t\t\tvar target = create_text();\n\n\t\t\t\tfragment.append(target);\n\n\t\t\t\tthis.#offscreen.set(key, {\n\t\t\t\t\teffect: branch(() => fn(target)),\n\t\t\t\t\tfragment\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tthis.#onscreen.set(\n\t\t\t\t\tkey,\n\t\t\t\t\tbranch(() => fn(this.anchor))\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\tthis.#batches.set(batch, key);\n\n\t\tif (defer) {\n\t\t\tfor (const [k, effect] of this.#onscreen) {\n\t\t\t\tif (k === key) {\n\t\t\t\t\tbatch.unskip_effect(effect);\n\t\t\t\t} else {\n\t\t\t\t\tbatch.skip_effect(effect);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor (const [k, branch] of this.#offscreen) {\n\t\t\t\tif (k === key) {\n\t\t\t\t\tbatch.unskip_effect(branch.effect);\n\t\t\t\t} else {\n\t\t\t\t\tbatch.skip_effect(branch.effect);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbatch.oncommit(this.#commit);\n\t\t\tbatch.ondiscard(this.#discard);\n\t\t} else {\n\t\t\tif (hydrating) {\n\t\t\t\tthis.anchor = hydrate_node;\n\t\t\t}\n\n\t\t\tthis.#commit(batch);\n\t\t}\n\t}\n}\n","/** @import { ComponentContext, ComponentContextLegacy } from '#client' */\n/** @import { EventDispatcher } from './index.js' */\n/** @import { NotFunction } from './internal/types.js' */\nimport { active_reaction, untrack } from './internal/client/runtime.js';\nimport { is_array } from './internal/shared/utils.js';\nimport { user_effect } from './internal/client/index.js';\nimport * as e from './internal/client/errors.js';\nimport { legacy_mode_flag } from './internal/flags/index.js';\nimport { component_context } from './internal/client/context.js';\nimport { DEV } from 'esm-env';\n\nif (DEV) {\n\t/**\n\t * @param {string} rune\n\t */\n\tfunction throw_rune_error(rune) {\n\t\tif (!(rune in globalThis)) {\n\t\t\t// TODO if people start adjusting the \"this can contain runes\" config through v-p-s more, adjust this message\n\t\t\t/** @type {any} */\n\t\t\tlet value; // let's hope noone modifies this global, but belts and braces\n\t\t\tObject.defineProperty(globalThis, rune, {\n\t\t\t\tconfigurable: true,\n\t\t\t\t// eslint-disable-next-line getter-return\n\t\t\t\tget: () => {\n\t\t\t\t\tif (value !== undefined) {\n\t\t\t\t\t\treturn value;\n\t\t\t\t\t}\n\n\t\t\t\t\te.rune_outside_svelte(rune);\n\t\t\t\t},\n\t\t\t\tset: (v) => {\n\t\t\t\t\tvalue = v;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t}\n\n\tthrow_rune_error('$state');\n\tthrow_rune_error('$effect');\n\tthrow_rune_error('$derived');\n\tthrow_rune_error('$inspect');\n\tthrow_rune_error('$props');\n\tthrow_rune_error('$bindable');\n}\n\n/**\n * Returns an [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal) that aborts when the current [derived](https://svelte.dev/docs/svelte/$derived) or [effect](https://svelte.dev/docs/svelte/$effect) re-runs or is destroyed.\n *\n * Must be called while a derived or effect is running.\n *\n * ```svelte\n * \n * ```\n */\nexport function getAbortSignal() {\n\tif (active_reaction === null) {\n\t\te.get_abort_signal_outside_reaction();\n\t}\n\n\treturn (active_reaction.ac ??= new AbortController()).signal;\n}\n\n/**\n * `onMount`, like [`$effect`](https://svelte.dev/docs/svelte/$effect), schedules a function to run as soon as the component has been mounted to the DOM.\n * Unlike `$effect`, the provided function only runs once.\n *\n * It must be called during the component's initialisation (but doesn't need to live _inside_ the component;\n * it can be called from an external module). If a function is returned _synchronously_ from `onMount`,\n * it will be called when the component is unmounted.\n *\n * `onMount` functions do not run during [server-side rendering](https://svelte.dev/docs/svelte/svelte-server#render).\n *\n * @template T\n * @param {() => NotFunction | Promise> | (() => any)} fn\n * @returns {void}\n */\nexport function onMount(fn) {\n\tif (component_context === null) {\n\t\te.lifecycle_outside_component('onMount');\n\t}\n\n\tif (legacy_mode_flag && component_context.l !== null) {\n\t\tinit_update_callbacks(component_context).m.push(fn);\n\t} else {\n\t\tuser_effect(() => {\n\t\t\tconst cleanup = untrack(fn);\n\t\t\tif (typeof cleanup === 'function') return /** @type {() => void} */ (cleanup);\n\t\t});\n\t}\n}\n\n/**\n * Schedules a callback to run immediately before the component is unmounted.\n *\n * Out of `onMount`, `beforeUpdate`, `afterUpdate` and `onDestroy`, this is the\n * only one that runs inside a server-side component.\n *\n * @param {() => any} fn\n * @returns {void}\n */\nexport function onDestroy(fn) {\n\tif (component_context === null) {\n\t\te.lifecycle_outside_component('onDestroy');\n\t}\n\n\tonMount(() => () => untrack(fn));\n}\n\n/**\n * @template [T=any]\n * @param {string} type\n * @param {T} [detail]\n * @param {any}params_0\n * @returns {CustomEvent}\n */\nfunction create_custom_event(type, detail, { bubbles = false, cancelable = false } = {}) {\n\treturn new CustomEvent(type, { detail, bubbles, cancelable });\n}\n\n/**\n * Creates an event dispatcher that can be used to dispatch [component events](https://svelte.dev/docs/svelte/legacy-on#Component-events).\n * Event dispatchers are functions that can take two arguments: `name` and `detail`.\n *\n * Component events created with `createEventDispatcher` create a\n * [CustomEvent](https://developer.mozilla.org/en-US/docs/Web/API/CustomEvent).\n * These events do not [bubble](https://developer.mozilla.org/en-US/docs/Learn/JavaScript/Building_blocks/Events#Event_bubbling_and_capture).\n * The `detail` argument corresponds to the [CustomEvent.detail](https://developer.mozilla.org/en-US/docs/Web/API/CustomEvent/detail)\n * property and can contain any type of data.\n *\n * The event dispatcher can be typed to narrow the allowed event names and the type of the `detail` argument:\n * ```ts\n * const dispatch = createEventDispatcher<{\n * loaded: null; // does not take a detail argument\n * change: string; // takes a detail argument of type string, which is required\n * optional: number | null; // takes an optional detail argument of type number\n * }>();\n * ```\n *\n * @deprecated Use callback props and/or the `$host()` rune instead — see [migration guide](https://svelte.dev/docs/svelte/v5-migration-guide#Event-changes-Component-events)\n * @template {Record} [EventMap = any]\n * @returns {EventDispatcher}\n */\nexport function createEventDispatcher() {\n\tconst active_component_context = component_context;\n\tif (active_component_context === null) {\n\t\te.lifecycle_outside_component('createEventDispatcher');\n\t}\n\n\t/**\n\t * @param [detail]\n\t * @param [options]\n\t */\n\treturn (type, detail, options) => {\n\t\tconst events = /** @type {Record} */ (\n\t\t\tactive_component_context.s.$$events\n\t\t)?.[/** @type {string} */ (type)];\n\n\t\tif (events) {\n\t\t\tconst callbacks = is_array(events) ? events.slice() : [events];\n\t\t\t// TODO are there situations where events could be dispatched\n\t\t\t// in a server (non-DOM) environment?\n\t\t\tconst event = create_custom_event(/** @type {string} */ (type), detail, options);\n\t\t\tfor (const fn of callbacks) {\n\t\t\t\tfn.call(active_component_context.x, event);\n\t\t\t}\n\t\t\treturn !event.defaultPrevented;\n\t\t}\n\n\t\treturn true;\n\t};\n}\n\n// TODO mark beforeUpdate and afterUpdate as deprecated in Svelte 6\n\n/**\n * Schedules a callback to run immediately before the component is updated after any state change.\n *\n * The first time the callback runs will be before the initial `onMount`.\n *\n * In runes mode use `$effect.pre` instead.\n *\n * @deprecated Use [`$effect.pre`](https://svelte.dev/docs/svelte/$effect#$effect.pre) instead\n * @param {() => void} fn\n * @returns {void}\n */\nexport function beforeUpdate(fn) {\n\tif (component_context === null) {\n\t\te.lifecycle_outside_component('beforeUpdate');\n\t}\n\n\tif (component_context.l === null) {\n\t\te.lifecycle_legacy_only('beforeUpdate');\n\t}\n\n\tinit_update_callbacks(component_context).b.push(fn);\n}\n\n/**\n * Schedules a callback to run immediately after the component has been updated.\n *\n * The first time the callback runs will be after the initial `onMount`.\n *\n * In runes mode use `$effect` instead.\n *\n * @deprecated Use [`$effect`](https://svelte.dev/docs/svelte/$effect) instead\n * @param {() => void} fn\n * @returns {void}\n */\nexport function afterUpdate(fn) {\n\tif (component_context === null) {\n\t\te.lifecycle_outside_component('afterUpdate');\n\t}\n\n\tif (component_context.l === null) {\n\t\te.lifecycle_legacy_only('afterUpdate');\n\t}\n\n\tinit_update_callbacks(component_context).a.push(fn);\n}\n\n/**\n * Legacy-mode: Init callbacks object for onMount/beforeUpdate/afterUpdate\n * @param {ComponentContext} context\n */\nfunction init_update_callbacks(context) {\n\tvar l = /** @type {ComponentContextLegacy} */ (context).l;\n\treturn (l.u ??= { a: [], b: [], m: [] });\n}\n\nexport { flushSync, fork } from './internal/client/reactivity/batch.js';\nexport {\n\tcreateContext,\n\tgetContext,\n\tgetAllContexts,\n\thasContext,\n\tsetContext\n} from './internal/client/context.js';\nexport { hydratable } from './internal/client/hydratable.js';\nexport { hydrate, mount, unmount } from './internal/client/render.js';\nexport { tick, untrack, settled } from './internal/client/runtime.js';\nexport { createRawSnippet } from './internal/client/dom/blocks/snippet.js';\n","/** @import { TemplateNode } from '#client' */\nimport { EFFECT_TRANSPARENT } from '#client/constants';\nimport {\n\thydrate_next,\n\thydrating,\n\tread_hydration_instruction,\n\tskip_nodes,\n\tset_hydrate_node,\n\tset_hydrating,\n\thydrate_node\n} from '../hydration.js';\nimport { block } from '../../reactivity/effects.js';\nimport { BranchManager } from './branches.js';\n\n/**\n * @param {TemplateNode} node\n * @param {(branch: (fn: (anchor: Node) => void, key?: number | false) => void) => void} fn\n * @param {boolean} [elseif] True if this is an `{:else if ...}` block rather than an `{#if ...}`, as that affects which transitions are considered 'local'\n * @returns {void}\n */\nexport function if_block(node, fn, elseif = false) {\n\t/** @type {TemplateNode | undefined} */\n\tvar marker;\n\tif (hydrating) {\n\t\tmarker = hydrate_node;\n\t\thydrate_next();\n\t}\n\n\tvar branches = new BranchManager(node);\n\tvar flags = elseif ? EFFECT_TRANSPARENT : 0;\n\n\t/**\n\t * @param {number | false} key\n\t * @param {null | ((anchor: Node) => void)} fn\n\t */\n\tfunction update_branch(key, fn) {\n\t\tif (hydrating) {\n\t\t\tvar data = read_hydration_instruction(/** @type {TemplateNode} */ (marker));\n\n\t\t\t// \"[n\" = branch n, \"[-1\" = else\n\t\t\tif (key !== parseInt(data.substring(1))) {\n\t\t\t\t// Hydration mismatch: remove everything inside the anchor and start fresh.\n\t\t\t\t// This could happen with `{#if browser}...{/if}`, for example\n\t\t\t\tvar anchor = skip_nodes();\n\n\t\t\t\tset_hydrate_node(anchor);\n\t\t\t\tbranches.anchor = anchor;\n\n\t\t\t\tset_hydrating(false);\n\t\t\t\tbranches.ensure(key, fn);\n\t\t\t\tset_hydrating(true);\n\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\n\t\tbranches.ensure(key, fn);\n\t}\n\n\tblock(() => {\n\t\tvar has_branch = false;\n\n\t\tfn((fn, key = 0) => {\n\t\t\thas_branch = true;\n\t\t\tupdate_branch(key, fn);\n\t\t});\n\n\t\tif (!has_branch) {\n\t\t\tupdate_branch(-1, null);\n\t\t}\n\t}, flags);\n}\n","/** @import { EachItem, EachOutroGroup, EachState, Effect, EffectNodes, MaybeSource, Source, TemplateNode, TransitionManager, Value } from '#client' */\n/** @import { Batch } from '../../reactivity/batch.js'; */\nimport {\n\tEACH_INDEX_REACTIVE,\n\tEACH_IS_ANIMATED,\n\tEACH_IS_CONTROLLED,\n\tEACH_ITEM_IMMUTABLE,\n\tEACH_ITEM_REACTIVE,\n\tHYDRATION_END,\n\tHYDRATION_START_ELSE\n} from '../../../../constants.js';\nimport {\n\thydrate_next,\n\thydrate_node,\n\thydrating,\n\tread_hydration_instruction,\n\tskip_nodes,\n\tset_hydrate_node,\n\tset_hydrating\n} from '../hydration.js';\nimport {\n\tclear_text_content,\n\tcreate_text,\n\tget_first_child,\n\tget_next_sibling,\n\tshould_defer_append\n} from '../operations.js';\nimport {\n\tblock,\n\tbranch,\n\tdestroy_effect,\n\tmove_effect,\n\tpause_effect,\n\tresume_effect\n} from '../../reactivity/effects.js';\nimport { source, mutable_source, internal_set } from '../../reactivity/sources.js';\nimport { array_from, is_array } from '../../../shared/utils.js';\nimport { BRANCH_EFFECT, COMMENT_NODE, DESTROYED, EFFECT_OFFSCREEN, INERT } from '#client/constants';\nimport { queue_micro_task } from '../task.js';\nimport { get } from '../../runtime.js';\nimport { DEV } from 'esm-env';\nimport { derived_safe_equal } from '../../reactivity/deriveds.js';\nimport { current_batch } from '../../reactivity/batch.js';\nimport * as e from '../../errors.js';\nimport { tag } from '../../dev/tracing.js';\n\n// When making substantive changes to this file, validate them with the each block stress test:\n// https://svelte.dev/playground/1972b2cf46564476ad8c8c6405b23b7b\n// This test also exists in this repo, as `packages/svelte/tests/manual/each-stress-test`\n\n/**\n * @param {any} _\n * @param {number} i\n */\nexport function index(_, i) {\n\treturn i;\n}\n\n/**\n * Pause multiple effects simultaneously, and coordinate their\n * subsequent destruction. Used in each blocks\n * @param {EachState} state\n * @param {Effect[]} to_destroy\n * @param {null | Node} controlled_anchor\n */\nfunction pause_effects(state, to_destroy, controlled_anchor) {\n\t/** @type {TransitionManager[]} */\n\tvar transitions = [];\n\tvar length = to_destroy.length;\n\n\t/** @type {EachOutroGroup} */\n\tvar group;\n\tvar remaining = to_destroy.length;\n\n\tfor (var i = 0; i < length; i++) {\n\t\tlet effect = to_destroy[i];\n\n\t\tpause_effect(\n\t\t\teffect,\n\t\t\t() => {\n\t\t\t\tif (group) {\n\t\t\t\t\tgroup.pending.delete(effect);\n\t\t\t\t\tgroup.done.add(effect);\n\n\t\t\t\t\tif (group.pending.size === 0) {\n\t\t\t\t\t\tvar groups = /** @type {Set} */ (state.outrogroups);\n\n\t\t\t\t\t\tdestroy_effects(state, array_from(group.done));\n\t\t\t\t\t\tgroups.delete(group);\n\n\t\t\t\t\t\tif (groups.size === 0) {\n\t\t\t\t\t\t\tstate.outrogroups = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tremaining -= 1;\n\t\t\t\t}\n\t\t\t},\n\t\t\tfalse\n\t\t);\n\t}\n\n\tif (remaining === 0) {\n\t\t// If we're in a controlled each block (i.e. the block is the only child of an\n\t\t// element), and we are removing all items, _and_ there are no out transitions,\n\t\t// we can use the fast path — emptying the element and replacing the anchor\n\t\tvar fast_path = transitions.length === 0 && controlled_anchor !== null;\n\n\t\tif (fast_path) {\n\t\t\tvar anchor = /** @type {Element} */ (controlled_anchor);\n\t\t\tvar parent_node = /** @type {Element} */ (anchor.parentNode);\n\n\t\t\tclear_text_content(parent_node);\n\t\t\tparent_node.append(anchor);\n\n\t\t\tstate.items.clear();\n\t\t}\n\n\t\tdestroy_effects(state, to_destroy, !fast_path);\n\t} else {\n\t\tgroup = {\n\t\t\tpending: new Set(to_destroy),\n\t\t\tdone: new Set()\n\t\t};\n\n\t\t(state.outrogroups ??= new Set()).add(group);\n\t}\n}\n\n/**\n * @param {EachState} state\n * @param {Effect[]} to_destroy\n * @param {boolean} remove_dom\n */\nfunction destroy_effects(state, to_destroy, remove_dom = true) {\n\t/** @type {Set | undefined} */\n\tvar preserved_effects;\n\n\t// The loop-in-a-loop isn't ideal, but we should only hit this in relatively rare cases\n\tif (state.pending.size > 0) {\n\t\tpreserved_effects = new Set();\n\n\t\tfor (const keys of state.pending.values()) {\n\t\t\tfor (const key of keys) {\n\t\t\t\tpreserved_effects.add(/** @type {EachItem} */ (state.items.get(key)).e);\n\t\t\t}\n\t\t}\n\t}\n\n\tfor (var i = 0; i < to_destroy.length; i++) {\n\t\tvar e = to_destroy[i];\n\n\t\tif (preserved_effects?.has(e)) {\n\t\t\te.f |= EFFECT_OFFSCREEN;\n\n\t\t\tconst fragment = document.createDocumentFragment();\n\t\t\tmove_effect(e, fragment);\n\t\t} else {\n\t\t\tdestroy_effect(to_destroy[i], remove_dom);\n\t\t}\n\t}\n}\n\n/** @type {TemplateNode} */\nvar offscreen_anchor;\n\n/**\n * @template V\n * @param {Element | Comment} node The next sibling node, or the parent node if this is a 'controlled' block\n * @param {number} flags\n * @param {() => V[]} get_collection\n * @param {(value: V, index: number) => any} get_key\n * @param {(anchor: Node, item: MaybeSource, index: MaybeSource) => void} render_fn\n * @param {null | ((anchor: Node) => void)} fallback_fn\n * @returns {void}\n */\nexport function each(node, flags, get_collection, get_key, render_fn, fallback_fn = null) {\n\tvar anchor = node;\n\n\t/** @type {Map} */\n\tvar items = new Map();\n\n\tvar is_controlled = (flags & EACH_IS_CONTROLLED) !== 0;\n\n\tif (is_controlled) {\n\t\tvar parent_node = /** @type {Element} */ (node);\n\n\t\tanchor = hydrating\n\t\t\t? set_hydrate_node(get_first_child(parent_node))\n\t\t\t: parent_node.appendChild(create_text());\n\t}\n\n\tif (hydrating) {\n\t\thydrate_next();\n\t}\n\n\t/** @type {Effect | null} */\n\tvar fallback = null;\n\n\t// TODO: ideally we could use derived for runes mode but because of the ability\n\t// to use a store which can be mutated, we can't do that here as mutating a store\n\t// will still result in the collection array being the same from the store\n\tvar each_array = derived_safe_equal(() => {\n\t\tvar collection = get_collection();\n\n\t\treturn is_array(collection) ? collection : collection == null ? [] : array_from(collection);\n\t});\n\n\tif (DEV) {\n\t\ttag(each_array, '{#each ...}');\n\t}\n\n\t/** @type {V[]} */\n\tvar array;\n\n\t/** @type {Map>} */\n\tvar pending = new Map();\n\n\tvar first_run = true;\n\n\t/**\n\t * @param {Batch} batch\n\t */\n\tfunction commit(batch) {\n\t\tif ((state.effect.f & DESTROYED) !== 0) {\n\t\t\treturn;\n\t\t}\n\n\t\tstate.pending.delete(batch);\n\n\t\tstate.fallback = fallback;\n\t\treconcile(state, array, anchor, flags, get_key);\n\n\t\tif (fallback !== null) {\n\t\t\tif (array.length === 0) {\n\t\t\t\tif ((fallback.f & EFFECT_OFFSCREEN) === 0) {\n\t\t\t\t\tresume_effect(fallback);\n\t\t\t\t} else {\n\t\t\t\t\tfallback.f ^= EFFECT_OFFSCREEN;\n\t\t\t\t\tmove(fallback, null, anchor);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpause_effect(fallback, () => {\n\t\t\t\t\t// TODO only null out if no pending batch needs it,\n\t\t\t\t\t// otherwise re-add `fallback.fragment` and move the\n\t\t\t\t\t// effect into it\n\t\t\t\t\tfallback = null;\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * @param {Batch} batch\n\t */\n\tfunction discard(batch) {\n\t\tstate.pending.delete(batch);\n\t}\n\n\tvar effect = block(() => {\n\t\tarray = /** @type {V[]} */ (get(each_array));\n\t\tvar length = array.length;\n\n\t\t/** `true` if there was a hydration mismatch. Needs to be a `let` or else it isn't treeshaken out */\n\t\tlet mismatch = false;\n\n\t\tif (hydrating) {\n\t\t\tvar is_else = read_hydration_instruction(anchor) === HYDRATION_START_ELSE;\n\n\t\t\tif (is_else !== (length === 0)) {\n\t\t\t\t// hydration mismatch — remove the server-rendered DOM and start over\n\t\t\t\tanchor = skip_nodes();\n\n\t\t\t\tset_hydrate_node(anchor);\n\t\t\t\tset_hydrating(false);\n\t\t\t\tmismatch = true;\n\t\t\t}\n\t\t}\n\n\t\tvar keys = new Set();\n\t\tvar batch = /** @type {Batch} */ (current_batch);\n\t\tvar defer = should_defer_append();\n\n\t\tfor (var index = 0; index < length; index += 1) {\n\t\t\tif (\n\t\t\t\thydrating &&\n\t\t\t\thydrate_node.nodeType === COMMENT_NODE &&\n\t\t\t\t/** @type {Comment} */ (hydrate_node).data === HYDRATION_END\n\t\t\t) {\n\t\t\t\t// The server rendered fewer items than expected,\n\t\t\t\t// so break out and continue appending non-hydrated items\n\t\t\t\tanchor = /** @type {Comment} */ (hydrate_node);\n\t\t\t\tmismatch = true;\n\t\t\t\tset_hydrating(false);\n\t\t\t}\n\n\t\t\tvar value = array[index];\n\t\t\tvar key = get_key(value, index);\n\n\t\t\tif (DEV) {\n\t\t\t\t// Check that the key function is idempotent (returns the same value when called twice)\n\t\t\t\tvar key_again = get_key(value, index);\n\t\t\t\tif (key !== key_again) {\n\t\t\t\t\te.each_key_volatile(String(index), String(key), String(key_again));\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar item = first_run ? null : items.get(key);\n\n\t\t\tif (item) {\n\t\t\t\t// update before reconciliation, to trigger any async updates\n\t\t\t\tif (item.v) internal_set(item.v, value);\n\t\t\t\tif (item.i) internal_set(item.i, index);\n\n\t\t\t\tif (defer) {\n\t\t\t\t\tbatch.unskip_effect(item.e);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\titem = create_item(\n\t\t\t\t\titems,\n\t\t\t\t\tfirst_run ? anchor : (offscreen_anchor ??= create_text()),\n\t\t\t\t\tvalue,\n\t\t\t\t\tkey,\n\t\t\t\t\tindex,\n\t\t\t\t\trender_fn,\n\t\t\t\t\tflags,\n\t\t\t\t\tget_collection\n\t\t\t\t);\n\n\t\t\t\tif (!first_run) {\n\t\t\t\t\titem.e.f |= EFFECT_OFFSCREEN;\n\t\t\t\t}\n\n\t\t\t\titems.set(key, item);\n\t\t\t}\n\n\t\t\tkeys.add(key);\n\t\t}\n\n\t\tif (length === 0 && fallback_fn && !fallback) {\n\t\t\tif (first_run) {\n\t\t\t\tfallback = branch(() => fallback_fn(anchor));\n\t\t\t} else {\n\t\t\t\tfallback = branch(() => fallback_fn((offscreen_anchor ??= create_text())));\n\t\t\t\tfallback.f |= EFFECT_OFFSCREEN;\n\t\t\t}\n\t\t}\n\n\t\tif (length > keys.size) {\n\t\t\tif (DEV) {\n\t\t\t\tvalidate_each_keys(array, get_key);\n\t\t\t} else {\n\t\t\t\t// in prod, the additional information isn't printed, so don't bother computing it\n\t\t\t\te.each_key_duplicate('', '', '');\n\t\t\t}\n\t\t}\n\n\t\t// remove excess nodes\n\t\tif (hydrating && length > 0) {\n\t\t\tset_hydrate_node(skip_nodes());\n\t\t}\n\n\t\tif (!first_run) {\n\t\t\tpending.set(batch, keys);\n\n\t\t\tif (defer) {\n\t\t\t\tfor (const [key, item] of items) {\n\t\t\t\t\tif (!keys.has(key)) {\n\t\t\t\t\t\tbatch.skip_effect(item.e);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbatch.oncommit(commit);\n\t\t\t\tbatch.ondiscard(discard);\n\t\t\t} else {\n\t\t\t\tcommit(batch);\n\t\t\t}\n\t\t}\n\n\t\tif (mismatch) {\n\t\t\t// continue in hydration mode\n\t\t\tset_hydrating(true);\n\t\t}\n\n\t\t// When we mount the each block for the first time, the collection won't be\n\t\t// connected to this effect as the effect hasn't finished running yet and its deps\n\t\t// won't be assigned. However, it's possible that when reconciling the each block\n\t\t// that a mutation occurred and it's made the collection MAYBE_DIRTY, so reading the\n\t\t// collection again can provide consistency to the reactive graph again as the deriveds\n\t\t// will now be `CLEAN`.\n\t\tget(each_array);\n\t});\n\n\t/** @type {EachState} */\n\tvar state = { effect, flags, items, pending, outrogroups: null, fallback };\n\n\tfirst_run = false;\n\n\tif (hydrating) {\n\t\tanchor = hydrate_node;\n\t}\n}\n\n/**\n * Skip past any non-branch effects (which could be created with `createSubscriber`, for example) to find the next branch effect\n * @param {Effect | null} effect\n * @returns {Effect | null}\n */\nfunction skip_to_branch(effect) {\n\twhile (effect !== null && (effect.f & BRANCH_EFFECT) === 0) {\n\t\teffect = effect.next;\n\t}\n\treturn effect;\n}\n\n/**\n * Add, remove, or reorder items output by an each block as its input changes\n * @template V\n * @param {EachState} state\n * @param {Array} array\n * @param {Element | Comment | Text} anchor\n * @param {number} flags\n * @param {(value: V, index: number) => any} get_key\n * @returns {void}\n */\nfunction reconcile(state, array, anchor, flags, get_key) {\n\tvar is_animated = (flags & EACH_IS_ANIMATED) !== 0;\n\n\tvar length = array.length;\n\tvar items = state.items;\n\tvar current = skip_to_branch(state.effect.first);\n\n\t/** @type {undefined | Set} */\n\tvar seen;\n\n\t/** @type {Effect | null} */\n\tvar prev = null;\n\n\t/** @type {undefined | Set} */\n\tvar to_animate;\n\n\t/** @type {Effect[]} */\n\tvar matched = [];\n\n\t/** @type {Effect[]} */\n\tvar stashed = [];\n\n\t/** @type {V} */\n\tvar value;\n\n\t/** @type {any} */\n\tvar key;\n\n\t/** @type {Effect | undefined} */\n\tvar effect;\n\n\t/** @type {number} */\n\tvar i;\n\n\tif (is_animated) {\n\t\tfor (i = 0; i < length; i += 1) {\n\t\t\tvalue = array[i];\n\t\t\tkey = get_key(value, i);\n\t\t\teffect = /** @type {EachItem} */ (items.get(key)).e;\n\n\t\t\t// offscreen == coming in now, no animation in that case,\n\t\t\t// else this would happen https://github.com/sveltejs/svelte/issues/17181\n\t\t\tif ((effect.f & EFFECT_OFFSCREEN) === 0) {\n\t\t\t\teffect.nodes?.a?.measure();\n\t\t\t\t(to_animate ??= new Set()).add(effect);\n\t\t\t}\n\t\t}\n\t}\n\n\tfor (i = 0; i < length; i += 1) {\n\t\tvalue = array[i];\n\t\tkey = get_key(value, i);\n\n\t\teffect = /** @type {EachItem} */ (items.get(key)).e;\n\n\t\tif (state.outrogroups !== null) {\n\t\t\tfor (const group of state.outrogroups) {\n\t\t\t\tgroup.pending.delete(effect);\n\t\t\t\tgroup.done.delete(effect);\n\t\t\t}\n\t\t}\n\n\t\tif ((effect.f & INERT) !== 0) {\n\t\t\tresume_effect(effect);\n\t\t\tif (is_animated) {\n\t\t\t\teffect.nodes?.a?.unfix();\n\t\t\t\t(to_animate ??= new Set()).delete(effect);\n\t\t\t}\n\t\t}\n\n\t\tif ((effect.f & EFFECT_OFFSCREEN) !== 0) {\n\t\t\teffect.f ^= EFFECT_OFFSCREEN;\n\n\t\t\tif (effect === current) {\n\t\t\t\tmove(effect, null, anchor);\n\t\t\t} else {\n\t\t\t\tvar next = prev ? prev.next : current;\n\n\t\t\t\tif (effect === state.effect.last) {\n\t\t\t\t\tstate.effect.last = effect.prev;\n\t\t\t\t}\n\n\t\t\t\tif (effect.prev) effect.prev.next = effect.next;\n\t\t\t\tif (effect.next) effect.next.prev = effect.prev;\n\t\t\t\tlink(state, prev, effect);\n\t\t\t\tlink(state, effect, next);\n\n\t\t\t\tmove(effect, next, anchor);\n\t\t\t\tprev = effect;\n\n\t\t\t\tmatched = [];\n\t\t\t\tstashed = [];\n\n\t\t\t\tcurrent = skip_to_branch(prev.next);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tif (effect !== current) {\n\t\t\tif (seen !== undefined && seen.has(effect)) {\n\t\t\t\tif (matched.length < stashed.length) {\n\t\t\t\t\t// more efficient to move later items to the front\n\t\t\t\t\tvar start = stashed[0];\n\t\t\t\t\tvar j;\n\n\t\t\t\t\tprev = start.prev;\n\n\t\t\t\t\tvar a = matched[0];\n\t\t\t\t\tvar b = matched[matched.length - 1];\n\n\t\t\t\t\tfor (j = 0; j < matched.length; j += 1) {\n\t\t\t\t\t\tmove(matched[j], start, anchor);\n\t\t\t\t\t}\n\n\t\t\t\t\tfor (j = 0; j < stashed.length; j += 1) {\n\t\t\t\t\t\tseen.delete(stashed[j]);\n\t\t\t\t\t}\n\n\t\t\t\t\tlink(state, a.prev, b.next);\n\t\t\t\t\tlink(state, prev, a);\n\t\t\t\t\tlink(state, b, start);\n\n\t\t\t\t\tcurrent = start;\n\t\t\t\t\tprev = b;\n\t\t\t\t\ti -= 1;\n\n\t\t\t\t\tmatched = [];\n\t\t\t\t\tstashed = [];\n\t\t\t\t} else {\n\t\t\t\t\t// more efficient to move earlier items to the back\n\t\t\t\t\tseen.delete(effect);\n\t\t\t\t\tmove(effect, current, anchor);\n\n\t\t\t\t\tlink(state, effect.prev, effect.next);\n\t\t\t\t\tlink(state, effect, prev === null ? state.effect.first : prev.next);\n\t\t\t\t\tlink(state, prev, effect);\n\n\t\t\t\t\tprev = effect;\n\t\t\t\t}\n\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tmatched = [];\n\t\t\tstashed = [];\n\n\t\t\twhile (current !== null && current !== effect) {\n\t\t\t\t(seen ??= new Set()).add(current);\n\t\t\t\tstashed.push(current);\n\t\t\t\tcurrent = skip_to_branch(current.next);\n\t\t\t}\n\n\t\t\tif (current === null) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tif ((effect.f & EFFECT_OFFSCREEN) === 0) {\n\t\t\tmatched.push(effect);\n\t\t}\n\n\t\tprev = effect;\n\t\tcurrent = skip_to_branch(effect.next);\n\t}\n\n\tif (state.outrogroups !== null) {\n\t\tfor (const group of state.outrogroups) {\n\t\t\tif (group.pending.size === 0) {\n\t\t\t\tdestroy_effects(state, array_from(group.done));\n\t\t\t\tstate.outrogroups?.delete(group);\n\t\t\t}\n\t\t}\n\n\t\tif (state.outrogroups.size === 0) {\n\t\t\tstate.outrogroups = null;\n\t\t}\n\t}\n\n\tif (current !== null || seen !== undefined) {\n\t\t/** @type {Effect[]} */\n\t\tvar to_destroy = [];\n\n\t\tif (seen !== undefined) {\n\t\t\tfor (effect of seen) {\n\t\t\t\tif ((effect.f & INERT) === 0) {\n\t\t\t\t\tto_destroy.push(effect);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\twhile (current !== null) {\n\t\t\t// If the each block isn't inert, then inert effects are currently outroing and will be removed once the transition is finished\n\t\t\tif ((current.f & INERT) === 0 && current !== state.fallback) {\n\t\t\t\tto_destroy.push(current);\n\t\t\t}\n\n\t\t\tcurrent = skip_to_branch(current.next);\n\t\t}\n\n\t\tvar destroy_length = to_destroy.length;\n\n\t\tif (destroy_length > 0) {\n\t\t\tvar controlled_anchor = (flags & EACH_IS_CONTROLLED) !== 0 && length === 0 ? anchor : null;\n\n\t\t\tif (is_animated) {\n\t\t\t\tfor (i = 0; i < destroy_length; i += 1) {\n\t\t\t\t\tto_destroy[i].nodes?.a?.measure();\n\t\t\t\t}\n\n\t\t\t\tfor (i = 0; i < destroy_length; i += 1) {\n\t\t\t\t\tto_destroy[i].nodes?.a?.fix();\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tpause_effects(state, to_destroy, controlled_anchor);\n\t\t}\n\t}\n\n\tif (is_animated) {\n\t\tqueue_micro_task(() => {\n\t\t\tif (to_animate === undefined) return;\n\t\t\tfor (effect of to_animate) {\n\t\t\t\teffect.nodes?.a?.apply();\n\t\t\t}\n\t\t});\n\t}\n}\n\n/**\n * @template V\n * @param {Map} items\n * @param {Node} anchor\n * @param {V} value\n * @param {unknown} key\n * @param {number} index\n * @param {(anchor: Node, item: V | Source, index: number | Value, collection: () => V[]) => void} render_fn\n * @param {number} flags\n * @param {() => V[]} get_collection\n * @returns {EachItem}\n */\nfunction create_item(items, anchor, value, key, index, render_fn, flags, get_collection) {\n\tvar v =\n\t\t(flags & EACH_ITEM_REACTIVE) !== 0\n\t\t\t? (flags & EACH_ITEM_IMMUTABLE) === 0\n\t\t\t\t? mutable_source(value, false, false)\n\t\t\t\t: source(value)\n\t\t\t: null;\n\n\tvar i = (flags & EACH_INDEX_REACTIVE) !== 0 ? source(index) : null;\n\n\tif (DEV && v) {\n\t\t// For tracing purposes, we need to link the source signal we create with the\n\t\t// collection + index so that tracing works as intended\n\t\tv.trace = () => {\n\t\t\t// eslint-disable-next-line @typescript-eslint/no-unused-expressions\n\t\t\tget_collection()[i?.v ?? index];\n\t\t};\n\t}\n\n\treturn {\n\t\tv,\n\t\ti,\n\t\te: branch(() => {\n\t\t\trender_fn(anchor, v ?? value, i ?? index, get_collection);\n\n\t\t\treturn () => {\n\t\t\t\titems.delete(key);\n\t\t\t};\n\t\t})\n\t};\n}\n\n/**\n * @param {Effect} effect\n * @param {Effect | null} next\n * @param {Text | Element | Comment} anchor\n */\nfunction move(effect, next, anchor) {\n\tif (!effect.nodes) return;\n\n\tvar node = effect.nodes.start;\n\tvar end = effect.nodes.end;\n\n\tvar dest =\n\t\tnext && (next.f & EFFECT_OFFSCREEN) === 0\n\t\t\t? /** @type {EffectNodes} */ (next.nodes).start\n\t\t\t: anchor;\n\n\twhile (node !== null) {\n\t\tvar next_node = /** @type {TemplateNode} */ (get_next_sibling(node));\n\t\tdest.before(node);\n\n\t\tif (node === end) {\n\t\t\treturn;\n\t\t}\n\n\t\tnode = next_node;\n\t}\n}\n\n/**\n * @param {EachState} state\n * @param {Effect | null} prev\n * @param {Effect | null} next\n */\nfunction link(state, prev, next) {\n\tif (prev === null) {\n\t\tstate.effect.first = next;\n\t} else {\n\t\tprev.next = next;\n\t}\n\n\tif (next === null) {\n\t\tstate.effect.last = prev;\n\t} else {\n\t\tnext.prev = prev;\n\t}\n}\n\n/**\n * @param {Array} array\n * @param {(item: any, index: number) => string} key_fn\n * @returns {void}\n */\nfunction validate_each_keys(array, key_fn) {\n\tconst keys = new Map();\n\tconst length = array.length;\n\n\tfor (let i = 0; i < length; i++) {\n\t\tconst key = key_fn(array[i], i);\n\n\t\tif (keys.has(key)) {\n\t\t\tconst a = String(keys.get(key));\n\t\t\tconst b = String(i);\n\n\t\t\t/** @type {string | null} */\n\t\t\tlet k = String(key);\n\t\t\tif (k.startsWith('[object ')) k = null;\n\n\t\t\te.each_key_duplicate(a, b, k);\n\t\t}\n\n\t\tkeys.set(key, i);\n\t}\n}\n","/** @import { Effect, TemplateNode } from '#client' */\n/** @import {} from 'trusted-types' */\nimport {\n\tFILENAME,\n\tHYDRATION_ERROR,\n\tNAMESPACE_SVG,\n\tNAMESPACE_MATHML\n} from '../../../../constants.js';\nimport { remove_effect_dom, template_effect } from '../../reactivity/effects.js';\nimport { hydrate_next, hydrate_node, hydrating, set_hydrate_node } from '../hydration.js';\n\nimport { assign_nodes } from '../template.js';\nimport * as w from '../../warnings.js';\nimport { hash, sanitize_location } from '../../../../utils.js';\nimport { DEV } from 'esm-env';\nimport { dev_current_component_function } from '../../context.js';\nimport { create_element, get_first_child, get_next_sibling } from '../operations.js';\nimport { active_effect } from '../../runtime.js';\nimport { COMMENT_NODE } from '#client/constants';\n\n/**\n * @param {Element} element\n * @param {string | null} server_hash\n * @param {string | TrustedHTML} value\n */\nfunction check_hash(element, server_hash, value) {\n\tif (!server_hash || server_hash === hash(String(value ?? ''))) return;\n\n\tlet location;\n\n\t// @ts-expect-error\n\tconst loc = element.__svelte_meta?.loc;\n\tif (loc) {\n\t\tlocation = `near ${loc.file}:${loc.line}:${loc.column}`;\n\t} else if (dev_current_component_function?.[FILENAME]) {\n\t\tlocation = `in ${dev_current_component_function[FILENAME]}`;\n\t}\n\n\tw.hydration_html_changed(sanitize_location(location));\n}\n\n/**\n * @param {Element | Text | Comment} node\n * @param {() => string | TrustedHTML} get_value\n * @param {boolean} [is_controlled]\n * @param {boolean} [svg]\n * @param {boolean} [mathml]\n * @param {boolean} [skip_warning]\n * @returns {void}\n */\nexport function html(\n\tnode,\n\tget_value,\n\tis_controlled = false,\n\tsvg = false,\n\tmathml = false,\n\tskip_warning = false\n) {\n\tvar anchor = node;\n\n\t/** @type {string | TrustedHTML} */\n\tvar value = '';\n\n\tif (is_controlled) {\n\t\tvar parent_node = /** @type {Element} */ (node);\n\n\t\tif (hydrating) {\n\t\t\tanchor = set_hydrate_node(get_first_child(parent_node));\n\t\t}\n\t}\n\n\ttemplate_effect(() => {\n\t\tvar effect = /** @type {Effect} */ (active_effect);\n\n\t\tif (value === (value = get_value() ?? '')) {\n\t\t\tif (hydrating) hydrate_next();\n\t\t\treturn;\n\t\t}\n\n\t\tif (is_controlled && !hydrating) {\n\t\t\t// When @html is the only child, use innerHTML directly.\n\t\t\t// This also handles contenteditable, where the user may delete the anchor comment.\n\t\t\teffect.nodes = null;\n\t\t\tparent_node.innerHTML = /** @type {string} */ (value);\n\n\t\t\tif (value !== '') {\n\t\t\t\tassign_nodes(\n\t\t\t\t\t/** @type {TemplateNode} */ (get_first_child(parent_node)),\n\t\t\t\t\t/** @type {TemplateNode} */ (parent_node.lastChild)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treturn;\n\t\t}\n\n\t\tif (effect.nodes !== null) {\n\t\t\tremove_effect_dom(effect.nodes.start, /** @type {TemplateNode} */ (effect.nodes.end));\n\t\t\teffect.nodes = null;\n\t\t}\n\n\t\tif (value === '') return;\n\n\t\tif (hydrating) {\n\t\t\t// We're deliberately not trying to repair mismatches between server and client,\n\t\t\t// as it's costly and error-prone (and it's an edge case to have a mismatch anyway)\n\t\t\tvar hash = /** @type {Comment} */ (hydrate_node).data;\n\n\t\t\t/** @type {TemplateNode | null} */\n\t\t\tvar next = hydrate_next();\n\t\t\tvar last = next;\n\n\t\t\twhile (\n\t\t\t\tnext !== null &&\n\t\t\t\t(next.nodeType !== COMMENT_NODE || /** @type {Comment} */ (next).data !== '')\n\t\t\t) {\n\t\t\t\tlast = next;\n\t\t\t\tnext = get_next_sibling(next);\n\t\t\t}\n\n\t\t\tif (next === null) {\n\t\t\t\tw.hydration_mismatch();\n\t\t\t\tthrow HYDRATION_ERROR;\n\t\t\t}\n\n\t\t\tif (DEV && !skip_warning) {\n\t\t\t\tcheck_hash(/** @type {Element} */ (next.parentNode), hash, value);\n\t\t\t}\n\n\t\t\tassign_nodes(hydrate_node, last);\n\t\t\tanchor = set_hydrate_node(next);\n\t\t\treturn;\n\t\t}\n\n\t\t// Don't use create_fragment_with_script_from_html here because that would mean script tags are executed.\n\t\t// @html is basically `.innerHTML = ...` and that doesn't execute scripts either due to security reasons.\n\t\t// Use a