Spaces:
Running
iana (#1)
Browse files- docs/handoff (2936d2efdffb9328f69a2e963b3ebd1de87e2449)
- plans/ (be602daa47c9f605d39659bc29515b59a433f59d)
- feat: implement origami RL environment (Phase 1) (d7f96cffadde2f76730c4871302da58301abd65f)
- feat: React observability dashboard + FastAPI server + matplotlib renderer (cecbed6224418454feaa683e9c5fcb12103b4ffd)
- feat: Python 3D origami mass-spring simulator (Ghassaei 2018) (94ab3fc15d67c0a7dcdc85153a42c9aa7bfa765a)
- Add 3D fold preview modes (e971f8f4c4e321dc962d15fd6801091d28396a6f)
- Add OpenEnv runtime adapter and server entrypoint (883cccb04ed467c34314981766ce0cb261a4fff4)
- Add OpenEnv manifest and deployment packaging (039a8a2b410b2a6511bad6d40885453da2eb3fa8)
- Add OpenEnv adapter contract tests (2f3409572f5262a20e9b67af66e8a469dfa923e4)
Co-authored-by: Iana Lin <ianalin123@users.noreply.huggingface.co>
- .gitignore +2 -0
- Dockerfile +12 -0
- docs/optigami_handoff.md +767 -0
- env/__init__.py +0 -0
- env/environment.py +243 -0
- env/graph.py +117 -0
- env/paper_state.py +150 -0
- env/prompts.py +235 -0
- env/rewards.py +93 -0
- env/targets/__init__.py +0 -0
- env/targets/accordion_3h.fold +67 -0
- env/targets/accordion_4h.fold +79 -0
- env/targets/diagonal_anti.fold +35 -0
- env/targets/diagonal_main.fold +35 -0
- env/targets/half_horizontal.fold +43 -0
- env/targets/half_vertical.fold +43 -0
- env/targets/thirds_h.fold +55 -0
- env/targets/thirds_v.fold +55 -0
- env/targets/validator.py +119 -0
- env/targets/validator_check.py +19 -0
- env/verifier.py +221 -0
- openenv.yaml +6 -0
- openenv_runtime/__init__.py +11 -0
- openenv_runtime/environment.py +183 -0
- openenv_runtime/models.py +63 -0
- openenv_server/__init__.py +1 -0
- openenv_server/app.py +14 -0
- plans/implementation_plan.md +485 -0
- pyproject.toml +20 -0
- requirements.txt +7 -0
- server.py +172 -0
- sim/__init__.py +0 -0
- sim/animate.py +149 -0
- sim/simulator.py +406 -0
- src/App.css +533 -23
- src/App.js +210 -15
- src/App.test.js +1 -8
- src/components/CreaseCanvas.js +113 -0
- src/components/Fold3DCanvas.js +327 -0
- src/components/InfoBadges.js +72 -0
- src/components/PlayerControls.js +54 -0
- src/components/RewardPanel.js +50 -0
- src/components/StepFeed.js +73 -0
- src/components/TargetSelector.js +38 -0
- src/index.css +29 -8
- src/reportWebVitals.js +1 -13
- tests/__init__.py +0 -0
- tests/test_graph.py +115 -0
- tests/test_openenv_adapter.py +60 -0
- tests/test_paper_state.py +77 -0
|
@@ -28,3 +28,5 @@ __pycache__/
|
|
| 28 |
|
| 29 |
# Reference repos (not pushed to HF)
|
| 30 |
.reference/
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Reference repos (not pushed to HF)
|
| 30 |
.reference/
|
| 31 |
+
*.pyc
|
| 32 |
+
__pycache__/
|
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM ghcr.io/meta-pytorch/openenv-base:latest
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY . /app
|
| 6 |
+
|
| 7 |
+
RUN pip install --no-cache-dir -r requirements.txt \
|
| 8 |
+
&& pip install --no-cache-dir "openenv-core[core]>=0.2.1"
|
| 9 |
+
|
| 10 |
+
ENV ENABLE_WEB_INTERFACE=false
|
| 11 |
+
|
| 12 |
+
CMD ["uvicorn", "openenv_server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
@@ -0,0 +1,767 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OrigamiRL — OpenEnv Hackathon Handoff Document
|
| 2 |
+
|
| 3 |
+
## TL;DR
|
| 4 |
+
|
| 5 |
+
Build the **first multi-turn RL environment where an LLM learns to generate origami folding instructions**, verified by a computational origami simulator. Target the OpenEnv Hackathon (March 7-8, 2026, SF — $100K+ in prizes). Use OpenEnv spec + Unsloth GRPO for training. Dense verifiable rewards from origami geometry theorems (Kawasaki, Maekawa). No learned reward model needed.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Hackathon Context
|
| 10 |
+
|
| 11 |
+
- **Event:** OpenEnv Hackathon SF, hosted by Cerebral Valley + Shack15 + Meta/PyTorch
|
| 12 |
+
- **Date:** March 7-8, 2026 (happening NOW)
|
| 13 |
+
- **Prize:** $100K+ cash
|
| 14 |
+
- **Teams:** Up to 4 people
|
| 15 |
+
- **Format:** Build RL environments, post-train a base model
|
| 16 |
+
|
| 17 |
+
### Judging Criteria
|
| 18 |
+
|
| 19 |
+
| Category | Weight | What Matters |
|
| 20 |
+
|----------|--------|-------------|
|
| 21 |
+
| Environment Innovation | 40% | Novel, creative, challenging. Does it meaningfully test agent behavior? |
|
| 22 |
+
| Storytelling | 30% | Clear problem explanation, engaging demo, easy to follow |
|
| 23 |
+
| Training Script Showing Improvement | 20% | Observable reward curves, before/after behavior |
|
| 24 |
+
| Reward and Training Pipeline Setup | 10% | Coherent reward logic, meaningful improvement in inference |
|
| 25 |
+
|
| 26 |
+
### Key Sponsors to Impress
|
| 27 |
+
|
| 28 |
+
- **Meta/PyTorch** — OpenEnv creators, want environments using their spec
|
| 29 |
+
- **Unsloth AI** — GRPO training infra, ART (Agent Reinforcement Trainer). USE THEIR TOOLS.
|
| 30 |
+
- **OpenPipe** — ART trainer (frontend/backend split for GRPO). Also use.
|
| 31 |
+
- **Patronus AI** — Building "generative simulators" (auto-scaling RL environments). They care about curriculum difficulty scaling and verifiable rewards.
|
| 32 |
+
- **Snorkel AI** — "2026 is the year of environments." They care about data quality and environment diversity.
|
| 33 |
+
- **Hugging Face** — OpenEnv Hub, want environments deployed there
|
| 34 |
+
- **Scale AI / Mercor** — Agent evaluation, structured task environments
|
| 35 |
+
|
| 36 |
+
---
|
| 37 |
+
|
| 38 |
+
## The Pitch (for judges)
|
| 39 |
+
|
| 40 |
+
> "Spatial reasoning is the next frontier for LLM training — NeurIPS 2025 papers like OrigamiSpace showed that even GPT-5 fails at multi-step origami reasoning. But those are benchmarks, not training environments. We built OrigamiRL: the first multi-turn RL environment where an LLM agent learns to fold paper by outputting instructions, receiving geometric feedback, and improving through GRPO. Our reward function is fully verifiable — fold validity is checked against computational origami axioms, not an LLM judge. We built it on OpenEnv + Unsloth with a natural curriculum from single folds to full cranes."
|
| 41 |
+
|
| 42 |
+
---
|
| 43 |
+
|
| 44 |
+
## Prior Work (What Exists, Where the Gaps Are)
|
| 45 |
+
|
| 46 |
+
### 1. OrigamiSpace (NeurIPS 2025 Spotlight)
|
| 47 |
+
|
| 48 |
+
- **Paper:** https://arxiv.org/abs/2511.18450
|
| 49 |
+
- **What it is:** Benchmark with 350 origami data instances (CP diagrams, folding processes, folded shapes). 4 evaluation tasks: Pattern Prediction, Multi-step Spatial Reasoning, Spatial Relationship Prediction, End-to-End CP Code Generation.
|
| 50 |
+
- **Their compiler:** Outputs detailed flattened diagrams with crease locations and stacking relationships, supports interactive simulation with MLLMs, provides comprehensive error feedback. Checks: syntax validity, geometric foldability, no self-intersections, Kawasaki's theorem, Maekawa's theorem.
|
| 51 |
+
- **Their reward metrics for code gen:** Hausdorff distance (shape similarity), dihedral angle distribution, bounding box aspect ratios, constraint satisfaction.
|
| 52 |
+
- **Difficulty levels:** Easy (3-9 steps), Medium (10-19 steps), Hard (20-30 steps)
|
| 53 |
+
- **Gap:** Single-turn only (LLM generates complete CP code in one shot). They mention RL exploration but it's not the focus. No multi-turn sequential folding.
|
| 54 |
+
|
| 55 |
+
### 2. GamiBench (Dec 2025)
|
| 56 |
+
|
| 57 |
+
- **Paper:** https://arxiv.org/abs/2512.22207
|
| 58 |
+
- **What it is:** 186 regular + 186 impossible 2D crease patterns with 3D folded shapes from 6 viewpoints. 3 VQA tasks.
|
| 59 |
+
- **Gap:** Evaluation-only, no training. Tests single-step spatial understanding.
|
| 60 |
+
|
| 61 |
+
### 3. SpatialThinker (NeurIPS 2025)
|
| 62 |
+
|
| 63 |
+
- **Paper:** https://arxiv.org/abs/2511.07403
|
| 64 |
+
- **What it is:** 3D-aware MLLM trained with RL using dense spatial rewards. Constructs scene graphs. Multi-objective reward with lexicographic gating.
|
| 65 |
+
- **Key architecture to steal:** Dense reward design with lexicographic ordering — format → count → accuracy → spatial. Nearly doubled RL training gains vs sparse rewards. Only needed 7K training samples with GRPO.
|
| 66 |
+
- **Gap:** Static scene understanding (objects on a table), not sequential physical transformations.
|
| 67 |
+
|
| 68 |
+
### 4. rigid-origami Gym (IJCAI 2023)
|
| 69 |
+
|
| 70 |
+
- **Repo:** https://github.com/belalugaX/rigid-origami
|
| 71 |
+
- **Paper:** "Automating Rigid Origami Design" (https://arxiv.org/abs/2211.13219)
|
| 72 |
+
- **What it is:** Gym environment where agent constructs crease pattern graphs on a board. Sparse rewards. Foldability validated by triangle intersection tests + kinematic rigidity model. Game terminates on non-foldable states.
|
| 73 |
+
- **Gap:** Classical RL agents (discrete grid actions), NOT LLMs generating text. Rigid-origami tessellations only, not traditional origami. No natural language.
|
| 74 |
+
|
| 75 |
+
### 5. The Unique Gap We Fill
|
| 76 |
+
|
| 77 |
+
Nobody has built a model that reasons about **sequential 2D-to-3D geometric transformations with physical constraints** through **natural language instructions** in a **multi-turn RL training loop**. Origami is uniquely hard because it requires tracking how a flat sheet's topology changes through a sequence of folds — mental rotation, spatial visualization, and perspective-taking all at once.
|
| 78 |
+
|
| 79 |
+
---
|
| 80 |
+
|
| 81 |
+
## Environment Design
|
| 82 |
+
|
| 83 |
+
### Architecture Overview
|
| 84 |
+
|
| 85 |
+
```
|
| 86 |
+
+---------------------------------------------------+
|
| 87 |
+
| OpenEnv Server |
|
| 88 |
+
| +-----------+ +----------+ +--------------+ |
|
| 89 |
+
| | State | | Action | | Reward | |
|
| 90 |
+
| | (FOLD JSON| | (LLM | | (Dense, | |
|
| 91 |
+
| | + target)| | output) | | verifiable) | |
|
| 92 |
+
| +-----------+ +----------+ +--------------+ |
|
| 93 |
+
| | | | |
|
| 94 |
+
| v v v |
|
| 95 |
+
| +-----------------------------------------------+|
|
| 96 |
+
| | Paper Geometry Engine (Python) ||
|
| 97 |
+
| | - Polygon state (Shapely) ||
|
| 98 |
+
| | - Fold operations (reflection across line) ||
|
| 99 |
+
| | - Kawasaki/Maekawa constraint checks ||
|
| 100 |
+
| | - Layer tracking ||
|
| 101 |
+
| | - FOLD format import/export ||
|
| 102 |
+
| +-----------------------------------------------+|
|
| 103 |
+
| | |
|
| 104 |
+
| v |
|
| 105 |
+
| +-----------------------------------------------+|
|
| 106 |
+
| | Three.js Visualizer (Demo only) ||
|
| 107 |
+
| | - 3D fold animation ||
|
| 108 |
+
| | - Strain heatmap ||
|
| 109 |
+
| | - Instruction stream ||
|
| 110 |
+
| +-----------------------------------------------+|
|
| 111 |
+
+---------------------------------------------------+
|
| 112 |
+
| ^
|
| 113 |
+
v |
|
| 114 |
+
+---------------------------------------------------+
|
| 115 |
+
| Unsloth ART / GRPO Trainer |
|
| 116 |
+
| - Qwen2.5-VL-7B or Qwen3-4B base model |
|
| 117 |
+
| - LoRA/QLoRA for efficient training |
|
| 118 |
+
| - Multi-turn rollouts |
|
| 119 |
+
+---------------------------------------------------+
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
### OpenEnv Spec Compliance
|
| 123 |
+
|
| 124 |
+
Must implement these APIs:
|
| 125 |
+
|
| 126 |
+
```python
|
| 127 |
+
class OrigamiEnv:
|
| 128 |
+
async def reset() -> Observation # New episode: flat paper + target
|
| 129 |
+
async def step(action) -> (Observation, reward, done, info)
|
| 130 |
+
async def state() -> State # Current paper geometry
|
| 131 |
+
async def close() # Cleanup
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
OpenEnv repo: https://github.com/meta-pytorch/OpenEnv
|
| 135 |
+
Install: `pip install -e .` then `openenv init origami_env`
|
| 136 |
+
|
| 137 |
+
### State Space
|
| 138 |
+
|
| 139 |
+
```python
|
| 140 |
+
@dataclass
|
| 141 |
+
class OrigamiState:
|
| 142 |
+
# Current paper geometry
|
| 143 |
+
vertices: List[Tuple[float, float]] # 2D vertex positions
|
| 144 |
+
edges: List[Tuple[int, int]] # Edge connectivity
|
| 145 |
+
edges_assignment: List[str] # 'M', 'V', 'B', 'F' (mountain/valley/boundary/flat)
|
| 146 |
+
edges_foldAngle: List[float] # -180 to 180 degrees
|
| 147 |
+
faces: List[List[int]] # Face vertex indices
|
| 148 |
+
layer_order: List[List[int]] # Face stacking order
|
| 149 |
+
|
| 150 |
+
# Episode context
|
| 151 |
+
target_crease_pattern: dict # Target FOLD JSON
|
| 152 |
+
target_shape_image: Optional[np.ndarray] # Target folded shape (for multimodal)
|
| 153 |
+
instruction_history: List[str] # Previous instructions
|
| 154 |
+
step_count: int
|
| 155 |
+
max_steps: int
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
This maps directly to the **FOLD format** (JSON-based, used by all origami software):
|
| 159 |
+
|
| 160 |
+
```json
|
| 161 |
+
{
|
| 162 |
+
"vertices_coords": [[0,0], [1,0], [1,1], [0,1]],
|
| 163 |
+
"edges_vertices": [[0,1], [1,2], [2,3], [3,0]],
|
| 164 |
+
"edges_assignment": ["B", "B", "B", "B"],
|
| 165 |
+
"edges_foldAngle": [0, 0, 0, 0],
|
| 166 |
+
"faces_vertices": [[0, 1, 2, 3]]
|
| 167 |
+
}
|
| 168 |
+
```
|
| 169 |
+
|
| 170 |
+
FOLD spec: https://github.com/edemaine/fold
|
| 171 |
+
FOLD JS library: https://edemaine.github.io/fold/
|
| 172 |
+
|
| 173 |
+
### Action Space
|
| 174 |
+
|
| 175 |
+
The LLM outputs a JSON action:
|
| 176 |
+
|
| 177 |
+
```json
|
| 178 |
+
{
|
| 179 |
+
"instruction": "Fold the top edge down to meet the bottom edge",
|
| 180 |
+
"fold_line": [[0, 0.5], [1, 0.5]],
|
| 181 |
+
"fold_angle": -180,
|
| 182 |
+
"assignment": "V"
|
| 183 |
+
}
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
The `instruction` field is natural language (what we're training the model to produce well). The geometric fields are the verifiable representation. During training, the model outputs both; for the final demo, the NL instruction is the star.
|
| 187 |
+
|
| 188 |
+
Alternative simpler action (for early iterations):
|
| 189 |
+
|
| 190 |
+
```json
|
| 191 |
+
{
|
| 192 |
+
"instruction": "Valley fold along the horizontal center line",
|
| 193 |
+
"fold_type": "valley",
|
| 194 |
+
"fold_axis": "horizontal",
|
| 195 |
+
"fold_position": 0.5
|
| 196 |
+
}
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
### Reward Function — Dense, Multi-Objective, Lexicographically Gated
|
| 200 |
+
|
| 201 |
+
Inspired by SpatialThinker's design. Rewards are computed in order; later rewards only apply if earlier gates pass.
|
| 202 |
+
|
| 203 |
+
```python
|
| 204 |
+
def compute_reward(state, action, new_state, target) -> dict:
|
| 205 |
+
rewards = {}
|
| 206 |
+
|
| 207 |
+
# LEVEL 1: Format (gate for everything else)
|
| 208 |
+
# Does the output parse into a valid fold operation?
|
| 209 |
+
rewards['format'] = 1.0 if parseable(action) else 0.0
|
| 210 |
+
if rewards['format'] == 0:
|
| 211 |
+
return rewards # Stop here
|
| 212 |
+
|
| 213 |
+
# LEVEL 2: Local Geometric Validity
|
| 214 |
+
# Kawasaki's theorem: sector angles at each interior vertex sum to 2pi
|
| 215 |
+
kawasaki_valid = check_kawasaki(new_state)
|
| 216 |
+
# Maekawa's theorem: |M - V| = 2 at each interior vertex
|
| 217 |
+
maekawa_valid = check_maekawa(new_state)
|
| 218 |
+
# No self-intersection
|
| 219 |
+
no_intersection = check_no_self_intersection(new_state)
|
| 220 |
+
rewards['validity'] = (kawasaki_valid + maekawa_valid + no_intersection) / 3.0
|
| 221 |
+
if rewards['validity'] < 0.5:
|
| 222 |
+
return rewards # Stop here
|
| 223 |
+
|
| 224 |
+
# LEVEL 3: Physical Feasibility
|
| 225 |
+
# Can this fold actually be performed given layer stack?
|
| 226 |
+
layer_consistent = check_layer_ordering(new_state)
|
| 227 |
+
fold_achievable = check_fold_angle_feasible(new_state)
|
| 228 |
+
rewards['feasibility'] = (layer_consistent + fold_achievable) / 2.0
|
| 229 |
+
|
| 230 |
+
# LEVEL 4: Progress Toward Target (Dense)
|
| 231 |
+
# Crease pattern graph similarity
|
| 232 |
+
cp_similarity = crease_pattern_similarity(new_state, target)
|
| 233 |
+
# Fold angle distribution match
|
| 234 |
+
angle_similarity = fold_angle_distribution_match(new_state, target)
|
| 235 |
+
# Bounding box aspect ratio match
|
| 236 |
+
bbox_similarity = bounding_box_similarity(new_state, target)
|
| 237 |
+
rewards['progress'] = 0.4 * cp_similarity + 0.4 * angle_similarity + 0.2 * bbox_similarity
|
| 238 |
+
|
| 239 |
+
# LEVEL 5: Completion Bonus
|
| 240 |
+
if shape_matches_target(new_state, target, tolerance=0.05):
|
| 241 |
+
rewards['completion'] = 10.0
|
| 242 |
+
|
| 243 |
+
# LEVEL 6: Efficiency
|
| 244 |
+
rewards['efficiency'] = -0.01 # Small step penalty to encourage fewer folds
|
| 245 |
+
|
| 246 |
+
# Total
|
| 247 |
+
rewards['total'] = (
|
| 248 |
+
0.1 * rewards['format'] +
|
| 249 |
+
0.2 * rewards['validity'] +
|
| 250 |
+
0.1 * rewards['feasibility'] +
|
| 251 |
+
0.5 * rewards['progress'] +
|
| 252 |
+
rewards.get('completion', 0) +
|
| 253 |
+
rewards['efficiency']
|
| 254 |
+
)
|
| 255 |
+
return rewards
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
### Key Origami Theorems for Verification
|
| 259 |
+
|
| 260 |
+
These are the verifiable constraints — the "unit tests" of origami:
|
| 261 |
+
|
| 262 |
+
1. **Kawasaki's Theorem:** At any interior vertex of a flat-foldable crease pattern, the alternating sum of sector angles equals zero (equivalently, they sum to 2pi on each side). NECESSARY condition for flat-foldability.
|
| 263 |
+
|
| 264 |
+
2. **Maekawa's Theorem:** At any interior vertex, the number of mountain folds minus valley folds equals +/-2. |M - V| = 2.
|
| 265 |
+
|
| 266 |
+
3. **No self-intersection:** Faces cannot penetrate each other during folding.
|
| 267 |
+
|
| 268 |
+
4. **Euler's formula for planar graphs:** V - E + F = 2 (sanity check on graph structure).
|
| 269 |
+
|
| 270 |
+
5. **Huzita-Hatori axioms:** The 7 axioms defining all possible single-fold operations (point-to-point, point-to-line, line-to-line, etc.). These define the VALID action space.
|
| 271 |
+
|
| 272 |
+
### Curriculum Design
|
| 273 |
+
|
| 274 |
+
| Level | Folds | Examples | Complexity |
|
| 275 |
+
|-------|-------|----------|-----------|
|
| 276 |
+
| 1 | 1 | Valley fold in half, mountain fold corner | Single fold validity |
|
| 277 |
+
| 2 | 2-3 | Paper airplane nose, triangle fold | Sequential dependency |
|
| 278 |
+
| 3 | 4-6 | Simple boat, fortune teller | Multi-step with symmetry |
|
| 279 |
+
| 4 | 7-12 | Paper airplane (full), jumping frog | Longer horizon planning |
|
| 280 |
+
| 5 | 13-20 | Crane, lily | Complex spatial tracking |
|
| 281 |
+
|
| 282 |
+
For the hackathon, focus on Levels 1-3. Even showing reward improvement on Level 1-2 is a strong result.
|
| 283 |
+
|
| 284 |
+
---
|
| 285 |
+
|
| 286 |
+
## Core Implementation: Python Geometry Engine
|
| 287 |
+
|
| 288 |
+
This is the MOST IMPORTANT piece. Pure Python, no JS dependencies.
|
| 289 |
+
|
| 290 |
+
```python
|
| 291 |
+
import numpy as np
|
| 292 |
+
from shapely.geometry import Polygon, LineString, MultiPolygon
|
| 293 |
+
from shapely.ops import split
|
| 294 |
+
from typing import List, Tuple, Dict
|
| 295 |
+
import json
|
| 296 |
+
|
| 297 |
+
class PaperState:
|
| 298 |
+
"""Represents the current state of the origami paper."""
|
| 299 |
+
|
| 300 |
+
def __init__(self, size: float = 1.0):
|
| 301 |
+
# Start with a unit square
|
| 302 |
+
self.regions = [Polygon([(0,0), (size,0), (size,size), (0,size)])]
|
| 303 |
+
self.fold_history = []
|
| 304 |
+
self.crease_lines = []
|
| 305 |
+
self.crease_assignments = [] # 'M' or 'V'
|
| 306 |
+
self.crease_angles = []
|
| 307 |
+
self.layer_order = [0] # Stack order of regions
|
| 308 |
+
|
| 309 |
+
def apply_fold(self, fold_line: LineString, angle: float, assignment: str) -> dict:
|
| 310 |
+
"""
|
| 311 |
+
Apply a fold operation. Returns dict with validity info.
|
| 312 |
+
fold_line: Shapely LineString defining the fold axis
|
| 313 |
+
angle: fold angle in degrees (-180 to 180)
|
| 314 |
+
assignment: 'M' (mountain) or 'V' (valley)
|
| 315 |
+
"""
|
| 316 |
+
result = {'valid': True, 'errors': []}
|
| 317 |
+
|
| 318 |
+
# 1. Split regions by fold line
|
| 319 |
+
new_regions = []
|
| 320 |
+
for region in self.regions:
|
| 321 |
+
if fold_line.intersects(region):
|
| 322 |
+
parts = split(region, fold_line)
|
| 323 |
+
new_regions.extend(parts.geoms)
|
| 324 |
+
else:
|
| 325 |
+
new_regions.append(region)
|
| 326 |
+
|
| 327 |
+
# 2. Determine which side folds (based on assignment)
|
| 328 |
+
folding_side = []
|
| 329 |
+
staying_side = []
|
| 330 |
+
for region in new_regions:
|
| 331 |
+
centroid = region.centroid
|
| 332 |
+
side = self._point_side(centroid, fold_line)
|
| 333 |
+
if side > 0:
|
| 334 |
+
folding_side.append(region)
|
| 335 |
+
else:
|
| 336 |
+
staying_side.append(region)
|
| 337 |
+
|
| 338 |
+
# 3. Reflect folding regions across fold line
|
| 339 |
+
reflected = [self._reflect_polygon(r, fold_line) for r in folding_side]
|
| 340 |
+
|
| 341 |
+
# 4. Update state
|
| 342 |
+
self.regions = staying_side + reflected
|
| 343 |
+
self.crease_lines.append(fold_line)
|
| 344 |
+
self.crease_assignments.append(assignment)
|
| 345 |
+
self.crease_angles.append(angle)
|
| 346 |
+
self.fold_history.append({
|
| 347 |
+
'line': list(fold_line.coords),
|
| 348 |
+
'angle': angle,
|
| 349 |
+
'assignment': assignment
|
| 350 |
+
})
|
| 351 |
+
|
| 352 |
+
# 5. Update layer order
|
| 353 |
+
self._update_layer_order(staying_side, reflected)
|
| 354 |
+
|
| 355 |
+
return result
|
| 356 |
+
|
| 357 |
+
def _reflect_polygon(self, poly: Polygon, line: LineString) -> Polygon:
|
| 358 |
+
"""Reflect a polygon across a line."""
|
| 359 |
+
coords = list(poly.exterior.coords)
|
| 360 |
+
reflected_coords = [self._reflect_point(p, line) for p in coords]
|
| 361 |
+
return Polygon(reflected_coords)
|
| 362 |
+
|
| 363 |
+
def _reflect_point(self, point: tuple, line: LineString) -> tuple:
|
| 364 |
+
"""Reflect a point across a line."""
|
| 365 |
+
p = np.array(point[:2])
|
| 366 |
+
l1 = np.array(line.coords[0])
|
| 367 |
+
l2 = np.array(line.coords[1])
|
| 368 |
+
d = l2 - l1
|
| 369 |
+
d = d / np.linalg.norm(d)
|
| 370 |
+
# Reflection formula: p' = p - 2(p-l1).n * n where n is normal to line
|
| 371 |
+
n = np.array([-d[1], d[0]])
|
| 372 |
+
v = p - l1
|
| 373 |
+
return tuple(p - 2 * np.dot(v, n) * n)
|
| 374 |
+
|
| 375 |
+
def _point_side(self, point, line: LineString) -> float:
|
| 376 |
+
"""Returns positive if point is on left side of line, negative if right."""
|
| 377 |
+
p = np.array([point.x, point.y])
|
| 378 |
+
l1 = np.array(line.coords[0])
|
| 379 |
+
l2 = np.array(line.coords[1])
|
| 380 |
+
return float(np.cross(l2 - l1, p - l1))
|
| 381 |
+
|
| 382 |
+
def _update_layer_order(self, staying, reflected):
|
| 383 |
+
"""Update the layer stacking order after a fold."""
|
| 384 |
+
self.layer_order = list(range(len(staying))) + \
|
| 385 |
+
list(range(len(staying), len(staying) + len(reflected)))
|
| 386 |
+
|
| 387 |
+
def to_fold_json(self) -> dict:
|
| 388 |
+
"""Export current state as FOLD format JSON."""
|
| 389 |
+
vertices = set()
|
| 390 |
+
for line in self.crease_lines:
|
| 391 |
+
for coord in line.coords:
|
| 392 |
+
vertices.add(tuple(round(c, 10) for c in coord))
|
| 393 |
+
# Add boundary vertices
|
| 394 |
+
for region in self.regions:
|
| 395 |
+
for coord in region.exterior.coords:
|
| 396 |
+
vertices.add(tuple(round(c, 10) for c in coord[:2]))
|
| 397 |
+
|
| 398 |
+
vertices = sorted(list(vertices))
|
| 399 |
+
vertex_map = {v: i for i, v in enumerate(vertices)}
|
| 400 |
+
|
| 401 |
+
edge_set = set()
|
| 402 |
+
edges_list = []
|
| 403 |
+
assignments_list = []
|
| 404 |
+
angles_list = []
|
| 405 |
+
|
| 406 |
+
# Add crease edges
|
| 407 |
+
for i, line in enumerate(self.crease_lines):
|
| 408 |
+
c = [tuple(round(x, 10) for x in coord) for coord in line.coords]
|
| 409 |
+
edge = tuple(sorted([vertex_map[c[0]], vertex_map[c[1]]]))
|
| 410 |
+
if edge not in edge_set:
|
| 411 |
+
edge_set.add(edge)
|
| 412 |
+
edges_list.append(list(edge))
|
| 413 |
+
assignments_list.append(self.crease_assignments[i])
|
| 414 |
+
angles_list.append(self.crease_angles[i])
|
| 415 |
+
|
| 416 |
+
return {
|
| 417 |
+
'vertices_coords': [list(v) for v in vertices],
|
| 418 |
+
'edges_vertices': edges_list,
|
| 419 |
+
'edges_assignment': assignments_list,
|
| 420 |
+
'edges_foldAngle': angles_list,
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class OrigamiVerifier:
|
| 425 |
+
"""Verifiable reward functions based on origami theorems."""
|
| 426 |
+
|
| 427 |
+
@staticmethod
|
| 428 |
+
def check_kawasaki(state: PaperState) -> bool:
|
| 429 |
+
"""Kawasaki's theorem: alternating sum of angles at each interior vertex = 0."""
|
| 430 |
+
fold_json = state.to_fold_json()
|
| 431 |
+
vertices = fold_json['vertices_coords']
|
| 432 |
+
edges = fold_json['edges_vertices']
|
| 433 |
+
|
| 434 |
+
for v_idx in range(len(vertices)):
|
| 435 |
+
v = vertices[v_idx]
|
| 436 |
+
incident_edges = [e for e in edges if v_idx in e]
|
| 437 |
+
if len(incident_edges) < 4:
|
| 438 |
+
continue # Need degree-4+ for Kawasaki
|
| 439 |
+
|
| 440 |
+
# Calculate sector angles
|
| 441 |
+
angles = []
|
| 442 |
+
for e in incident_edges:
|
| 443 |
+
other = e[1] if e[0] == v_idx else e[0]
|
| 444 |
+
other_v = vertices[other]
|
| 445 |
+
angle = np.arctan2(other_v[1] - v[1], other_v[0] - v[0])
|
| 446 |
+
angles.append(angle)
|
| 447 |
+
|
| 448 |
+
angles.sort()
|
| 449 |
+
sector_angles = []
|
| 450 |
+
for i in range(len(angles) - 1):
|
| 451 |
+
sector_angles.append(angles[i+1] - angles[i])
|
| 452 |
+
sector_angles.append(2*np.pi - (angles[-1] - angles[0]))
|
| 453 |
+
|
| 454 |
+
# Kawasaki: alternating sum should be ~0
|
| 455 |
+
if len(sector_angles) >= 4:
|
| 456 |
+
alt_sum = sum(sector_angles[::2]) - sum(sector_angles[1::2])
|
| 457 |
+
if abs(alt_sum) > 0.01:
|
| 458 |
+
return False
|
| 459 |
+
return True
|
| 460 |
+
|
| 461 |
+
@staticmethod
|
| 462 |
+
def check_maekawa(state: PaperState) -> bool:
|
| 463 |
+
"""Maekawa's theorem: |M - V| = 2 at each interior vertex."""
|
| 464 |
+
fold_json = state.to_fold_json()
|
| 465 |
+
vertices = fold_json['vertices_coords']
|
| 466 |
+
edges = fold_json['edges_vertices']
|
| 467 |
+
assignments = fold_json['edges_assignment']
|
| 468 |
+
|
| 469 |
+
for v_idx in range(len(vertices)):
|
| 470 |
+
incident = [(i, e) for i, e in enumerate(edges) if v_idx in e]
|
| 471 |
+
m_count = sum(1 for i, _ in incident if i < len(assignments) and assignments[i] == 'M')
|
| 472 |
+
v_count = sum(1 for i, _ in incident if i < len(assignments) and assignments[i] == 'V')
|
| 473 |
+
|
| 474 |
+
if m_count + v_count >= 4: # Interior vertex with folds
|
| 475 |
+
if abs(m_count - v_count) != 2:
|
| 476 |
+
return False
|
| 477 |
+
return True
|
| 478 |
+
|
| 479 |
+
@staticmethod
|
| 480 |
+
def crease_pattern_similarity(state: PaperState, target_fold_json: dict) -> float:
|
| 481 |
+
"""Compare current crease pattern to target. Returns 0-1 similarity."""
|
| 482 |
+
current = state.to_fold_json()
|
| 483 |
+
|
| 484 |
+
n_current = len(current.get('edges_vertices', []))
|
| 485 |
+
n_target = len(target_fold_json.get('edges_vertices', []))
|
| 486 |
+
|
| 487 |
+
if n_target == 0:
|
| 488 |
+
return 1.0 if n_current == 0 else 0.0
|
| 489 |
+
|
| 490 |
+
edge_count_sim = 1.0 - abs(n_current - n_target) / max(n_target, 1)
|
| 491 |
+
edge_count_sim = max(0, edge_count_sim)
|
| 492 |
+
|
| 493 |
+
current_assignments = current.get('edges_assignment', [])
|
| 494 |
+
target_assignments = target_fold_json.get('edges_assignment', [])
|
| 495 |
+
|
| 496 |
+
c_m = current_assignments.count('M')
|
| 497 |
+
c_v = current_assignments.count('V')
|
| 498 |
+
t_m = target_assignments.count('M')
|
| 499 |
+
t_v = target_assignments.count('V')
|
| 500 |
+
|
| 501 |
+
total = max(t_m + t_v, 1)
|
| 502 |
+
assign_sim = 1.0 - (abs(c_m - t_m) + abs(c_v - t_v)) / (2 * total)
|
| 503 |
+
assign_sim = max(0, assign_sim)
|
| 504 |
+
|
| 505 |
+
return 0.5 * edge_count_sim + 0.5 * assign_sim
|
| 506 |
+
```
|
| 507 |
+
|
| 508 |
+
---
|
| 509 |
+
|
| 510 |
+
## OpenEnv Environment Wrapper
|
| 511 |
+
|
| 512 |
+
```python
|
| 513 |
+
# origami_env/server.py
|
| 514 |
+
from openenv.core import Environment
|
| 515 |
+
from paper_engine import PaperState, OrigamiVerifier
|
| 516 |
+
from shapely.geometry import LineString
|
| 517 |
+
import json
|
| 518 |
+
|
| 519 |
+
class OrigamiEnvironment(Environment):
|
| 520 |
+
|
| 521 |
+
def __init__(self, targets_dir="targets/", max_steps=20):
|
| 522 |
+
self.targets_dir = targets_dir
|
| 523 |
+
self.max_steps = max_steps
|
| 524 |
+
self.paper = None
|
| 525 |
+
self.target = None
|
| 526 |
+
self.step_count = 0
|
| 527 |
+
|
| 528 |
+
async def reset(self, target_id=None):
|
| 529 |
+
self.paper = PaperState(size=1.0)
|
| 530 |
+
self.target = self._load_target(target_id)
|
| 531 |
+
self.step_count = 0
|
| 532 |
+
return self._get_observation()
|
| 533 |
+
|
| 534 |
+
async def step(self, action):
|
| 535 |
+
self.step_count += 1
|
| 536 |
+
|
| 537 |
+
# Parse action
|
| 538 |
+
try:
|
| 539 |
+
fold_line = LineString(action['fold_line'])
|
| 540 |
+
angle = action['fold_angle']
|
| 541 |
+
assignment = action['assignment']
|
| 542 |
+
except (KeyError, Exception):
|
| 543 |
+
reward = {'format': 0, 'total': -0.1}
|
| 544 |
+
return self._get_observation(), reward, False, {'error': 'parse_failed'}
|
| 545 |
+
|
| 546 |
+
# Apply fold
|
| 547 |
+
result = self.paper.apply_fold(fold_line, angle, assignment)
|
| 548 |
+
|
| 549 |
+
# Compute rewards
|
| 550 |
+
reward = self._compute_reward(result)
|
| 551 |
+
|
| 552 |
+
# Check termination
|
| 553 |
+
done = (
|
| 554 |
+
self.step_count >= self.max_steps or
|
| 555 |
+
reward.get('completion', 0) > 0
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
return self._get_observation(), reward, done, {}
|
| 559 |
+
|
| 560 |
+
async def state(self):
|
| 561 |
+
return {
|
| 562 |
+
'paper': self.paper.to_fold_json(),
|
| 563 |
+
'target': self.target,
|
| 564 |
+
'step': self.step_count,
|
| 565 |
+
'fold_history': self.paper.fold_history
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
def _compute_reward(self, fold_result):
|
| 569 |
+
rewards = {}
|
| 570 |
+
rewards['format'] = 1.0
|
| 571 |
+
|
| 572 |
+
kawasaki = OrigamiVerifier.check_kawasaki(self.paper)
|
| 573 |
+
maekawa = OrigamiVerifier.check_maekawa(self.paper)
|
| 574 |
+
rewards['validity'] = (float(kawasaki) + float(maekawa)) / 2.0
|
| 575 |
+
|
| 576 |
+
rewards['progress'] = OrigamiVerifier.crease_pattern_similarity(
|
| 577 |
+
self.paper, self.target
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
if rewards['progress'] > 0.95:
|
| 581 |
+
rewards['completion'] = 10.0
|
| 582 |
+
|
| 583 |
+
rewards['efficiency'] = -0.01
|
| 584 |
+
|
| 585 |
+
rewards['total'] = (
|
| 586 |
+
0.1 * rewards['format'] +
|
| 587 |
+
0.2 * rewards['validity'] +
|
| 588 |
+
0.6 * rewards['progress'] +
|
| 589 |
+
rewards.get('completion', 0) +
|
| 590 |
+
rewards['efficiency']
|
| 591 |
+
)
|
| 592 |
+
return rewards
|
| 593 |
+
|
| 594 |
+
def _get_observation(self):
|
| 595 |
+
return {
|
| 596 |
+
'paper_state': self.paper.to_fold_json(),
|
| 597 |
+
'target': self.target,
|
| 598 |
+
'step': self.step_count,
|
| 599 |
+
'instruction_history': [str(f['line']) for f in self.paper.fold_history]
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
def _load_target(self, target_id):
|
| 603 |
+
if target_id:
|
| 604 |
+
with open(f"{self.targets_dir}/{target_id}.fold") as f:
|
| 605 |
+
return json.load(f)
|
| 606 |
+
# Default: simple valley fold in half
|
| 607 |
+
return {
|
| 608 |
+
'vertices_coords': [[0,0], [1,0], [1,1], [0,1], [0,0.5], [1,0.5]],
|
| 609 |
+
'edges_vertices': [[0,1], [1,2], [2,3], [3,0], [4,5]],
|
| 610 |
+
'edges_assignment': ['B', 'B', 'B', 'B', 'V'],
|
| 611 |
+
'edges_foldAngle': [0, 0, 0, 0, -180],
|
| 612 |
+
}
|
| 613 |
+
```
|
| 614 |
+
|
| 615 |
+
---
|
| 616 |
+
|
| 617 |
+
## Training Script (Unsloth GRPO)
|
| 618 |
+
|
| 619 |
+
```python
|
| 620 |
+
# train.py
|
| 621 |
+
from unsloth import FastLanguageModel
|
| 622 |
+
from trl import GRPOConfig, GRPOTrainer
|
| 623 |
+
import torch
|
| 624 |
+
|
| 625 |
+
# Load model
|
| 626 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 627 |
+
model_name="unsloth/Qwen2.5-7B-Instruct",
|
| 628 |
+
max_seq_length=4096,
|
| 629 |
+
load_in_4bit=True,
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
# Add LoRA
|
| 633 |
+
model = FastLanguageModel.get_peft_model(
|
| 634 |
+
model,
|
| 635 |
+
r=32,
|
| 636 |
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
|
| 637 |
+
"gate_proj", "up_proj", "down_proj"],
|
| 638 |
+
lora_alpha=32,
|
| 639 |
+
lora_dropout=0,
|
| 640 |
+
use_gradient_checkpointing="unsloth",
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
# Reward function
|
| 644 |
+
def origami_reward(completions, prompts):
|
| 645 |
+
"""Compute rewards for a batch of completions."""
|
| 646 |
+
rewards = []
|
| 647 |
+
for completion in completions:
|
| 648 |
+
try:
|
| 649 |
+
action = parse_fold_action(completion)
|
| 650 |
+
paper = PaperState()
|
| 651 |
+
result = paper.apply_fold(action['fold_line'], action['angle'], action['assignment'])
|
| 652 |
+
r = compute_reward(paper, target)
|
| 653 |
+
rewards.append(r['total'])
|
| 654 |
+
except Exception:
|
| 655 |
+
rewards.append(-0.1)
|
| 656 |
+
return rewards
|
| 657 |
+
|
| 658 |
+
# GRPO Config
|
| 659 |
+
config = GRPOConfig(
|
| 660 |
+
output_dir="origami-grpo",
|
| 661 |
+
num_train_epochs=3,
|
| 662 |
+
per_device_train_batch_size=4,
|
| 663 |
+
gradient_accumulation_steps=4,
|
| 664 |
+
learning_rate=5e-6,
|
| 665 |
+
max_completion_length=512,
|
| 666 |
+
num_generations=8,
|
| 667 |
+
temperature=1.0,
|
| 668 |
+
logging_steps=1,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
dataset = load_origami_prompts()
|
| 672 |
+
|
| 673 |
+
trainer = GRPOTrainer(
|
| 674 |
+
model=model,
|
| 675 |
+
config=config,
|
| 676 |
+
train_dataset=dataset,
|
| 677 |
+
reward_funcs=[origami_reward],
|
| 678 |
+
tokenizer=tokenizer,
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
trainer.train()
|
| 682 |
+
```
|
| 683 |
+
|
| 684 |
+
---
|
| 685 |
+
|
| 686 |
+
## Visualization (Demo Only — Not in Training Loop)
|
| 687 |
+
|
| 688 |
+
### Options
|
| 689 |
+
|
| 690 |
+
1. **Origami Simulator** — https://github.com/amandaghassaei/OrigamiSimulator — Three.js, accepts FOLD files, shows folding animation with strain visualization
|
| 691 |
+
2. **PackCAD** — https://packcad.com/ — Web-based, SVG crease patterns, rigid folding simulation
|
| 692 |
+
3. **Custom Three.js** — Simpler but more control
|
| 693 |
+
|
| 694 |
+
### Demo UI Layout
|
| 695 |
+
|
| 696 |
+
```
|
| 697 |
+
+----------------------+----------------------+
|
| 698 |
+
| Instruction Stream | 3D Fold Viewer |
|
| 699 |
+
| | |
|
| 700 |
+
| Step 1: Valley fold | [Three.js canvas] |
|
| 701 |
+
| along center [OK] | |
|
| 702 |
+
| | Paper animating |
|
| 703 |
+
| Step 2: Fold top | fold by fold |
|
| 704 |
+
| corners to center | |
|
| 705 |
+
| | |
|
| 706 |
+
+----------------------+----------------------+
|
| 707 |
+
| Reward Dashboard |
|
| 708 |
+
| Format: ========== 1.0 |
|
| 709 |
+
| Validity: ========.. 0.8 |
|
| 710 |
+
| Progress: ======.... 0.6 |
|
| 711 |
+
| Total: =======... 0.72 |
|
| 712 |
+
| |
|
| 713 |
+
| [Reward curve over training steps] |
|
| 714 |
+
+----------------------------------------------+
|
| 715 |
+
```
|
| 716 |
+
|
| 717 |
+
---
|
| 718 |
+
|
| 719 |
+
## Key Libraries and Resources
|
| 720 |
+
|
| 721 |
+
| Tool | Purpose | Link |
|
| 722 |
+
|------|---------|------|
|
| 723 |
+
| OpenEnv | Environment framework | https://github.com/meta-pytorch/OpenEnv |
|
| 724 |
+
| Unsloth | GRPO training | https://github.com/unslothai/unsloth |
|
| 725 |
+
| OpenPipe ART | Multi-turn RL trainer | https://github.com/OpenPipe/ART |
|
| 726 |
+
| FOLD format | Origami data structure | https://github.com/edemaine/fold |
|
| 727 |
+
| Rabbit Ear | JS origami library | https://github.com/rabbit-ear/rabbit-ear |
|
| 728 |
+
| Origami Simulator | 3D visualization | https://github.com/amandaghassaei/OrigamiSimulator |
|
| 729 |
+
| PackCAD | Folding simulation | https://packcad.com/ |
|
| 730 |
+
| Shapely | Python geometry | pip install shapely |
|
| 731 |
+
| rigid-origami gym | Reference gym env | https://github.com/belalugaX/rigid-origami |
|
| 732 |
+
|
| 733 |
+
### Papers to Cite
|
| 734 |
+
|
| 735 |
+
- OrigamiSpace: https://arxiv.org/abs/2511.18450
|
| 736 |
+
- GamiBench: https://arxiv.org/abs/2512.22207
|
| 737 |
+
- SpatialThinker: https://arxiv.org/abs/2511.07403
|
| 738 |
+
- Automating Rigid Origami Design: https://arxiv.org/abs/2211.13219
|
| 739 |
+
- FOLD format spec: https://github.com/edemaine/fold/blob/main/doc/spec.md
|
| 740 |
+
|
| 741 |
+
---
|
| 742 |
+
|
| 743 |
+
## Priority Build Order
|
| 744 |
+
|
| 745 |
+
1. **Python geometry engine** — PaperState class with fold operations and FOLD export
|
| 746 |
+
2. **Verifier functions** — Kawasaki, Maekawa, similarity metrics
|
| 747 |
+
3. **OpenEnv wrapper** — step/reset/state API
|
| 748 |
+
4. **Simple targets** — Hand-create 5-10 Level 1-2 targets as .fold files
|
| 749 |
+
5. **Training script** — Wire up Unsloth GRPO with reward function
|
| 750 |
+
6. **Run training** — Even on small model, get reward curves
|
| 751 |
+
7. **Three.js visualizer** — For demo only, not in training loop
|
| 752 |
+
8. **Before/after demo** — Show base model vs trained model outputs
|
| 753 |
+
9. **Polish presentation narrative**
|
| 754 |
+
|
| 755 |
+
---
|
| 756 |
+
|
| 757 |
+
## Narrative for Judges
|
| 758 |
+
|
| 759 |
+
**The story arc:**
|
| 760 |
+
|
| 761 |
+
1. "LLMs are great at text but terrible at spatial reasoning"
|
| 762 |
+
2. "Origami is the perfect testbed — it's sequential, physical, and verifiable"
|
| 763 |
+
3. "NeurIPS 2025 showed even GPT-5 fails at origami benchmarks, but nobody built a TRAINING environment"
|
| 764 |
+
4. "We built OrigamiRL — the first multi-turn RL environment for origami instruction generation"
|
| 765 |
+
5. "Our rewards come from math theorems, not vibes — Kawasaki's theorem is our unit test"
|
| 766 |
+
6. "Watch the model go from generating paper-tearing nonsense to valid fold sequences"
|
| 767 |
+
7. "This generalizes to any domain where LLMs need to output structured physical instructions"
|
|
File without changes
|
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import copy
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from .paper_state import PaperState
|
| 8 |
+
from .rewards import compute_reward, compute_terminal_reward, load_target, target_crease_edges
|
| 9 |
+
from .prompts import (
|
| 10 |
+
code_as_policy_prompt,
|
| 11 |
+
step_level_prompt,
|
| 12 |
+
parse_fold_list,
|
| 13 |
+
parse_single_fold,
|
| 14 |
+
)
|
| 15 |
+
from .verifier import check_all_vertices
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
TARGETS_DIR = Path(__file__).parent / 'targets'
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class OrigamiEnvironment:
|
| 22 |
+
"""
|
| 23 |
+
OpenEnv-compatible origami crease pattern environment.
|
| 24 |
+
|
| 25 |
+
Supports two modes:
|
| 26 |
+
- code_as_policy: model outputs complete fold sequence, gets terminal reward
|
| 27 |
+
- step: model outputs one fold at a time, gets per-step reward
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
mode: str = 'code_as_policy', # 'code_as_policy' or 'step'
|
| 33 |
+
max_steps: int = 8,
|
| 34 |
+
targets_dir: Optional[str] = None,
|
| 35 |
+
):
|
| 36 |
+
assert mode in ('code_as_policy', 'step'), f"Unknown mode: {mode}"
|
| 37 |
+
self.mode = mode
|
| 38 |
+
self.max_steps = max_steps
|
| 39 |
+
self.targets_dir = Path(targets_dir) if targets_dir else TARGETS_DIR
|
| 40 |
+
|
| 41 |
+
self.paper: Optional[PaperState] = None
|
| 42 |
+
self.target: Optional[dict] = None
|
| 43 |
+
self.target_name: Optional[str] = None
|
| 44 |
+
self.step_count: int = 0
|
| 45 |
+
self.last_reward: Optional[dict] = None
|
| 46 |
+
|
| 47 |
+
# Cache all available targets
|
| 48 |
+
self._targets = self._load_all_targets()
|
| 49 |
+
|
| 50 |
+
def _load_all_targets(self) -> dict[str, dict]:
|
| 51 |
+
targets = {}
|
| 52 |
+
for fold_file in self.targets_dir.glob('*.fold'):
|
| 53 |
+
with open(fold_file) as f:
|
| 54 |
+
targets[fold_file.stem] = json.load(f)
|
| 55 |
+
return targets
|
| 56 |
+
|
| 57 |
+
def available_targets(self) -> list[str]:
|
| 58 |
+
return sorted(self._targets.keys())
|
| 59 |
+
|
| 60 |
+
def reset(self, target_name: Optional[str] = None) -> dict:
|
| 61 |
+
"""
|
| 62 |
+
Reset environment to start of a new episode.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
target_name: name of target (stem of .fold file). If None, picks level-1 randomly.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
observation dict with 'prompt' key containing the LLM prompt string.
|
| 69 |
+
"""
|
| 70 |
+
import random
|
| 71 |
+
|
| 72 |
+
if target_name:
|
| 73 |
+
assert target_name in self._targets, f"Unknown target: {target_name}"
|
| 74 |
+
self.target_name = target_name
|
| 75 |
+
else:
|
| 76 |
+
# Default to level-1 targets
|
| 77 |
+
level1 = [k for k, v in self._targets.items() if v.get('level', 1) == 1]
|
| 78 |
+
self.target_name = random.choice(level1 if level1 else list(self._targets.keys()))
|
| 79 |
+
|
| 80 |
+
self.target = self._targets[self.target_name]
|
| 81 |
+
self.paper = PaperState()
|
| 82 |
+
self.step_count = 0
|
| 83 |
+
self.last_reward = None
|
| 84 |
+
|
| 85 |
+
return self._get_observation()
|
| 86 |
+
|
| 87 |
+
def step(self, action) -> tuple[dict, dict, bool, dict]:
|
| 88 |
+
"""
|
| 89 |
+
Execute an action.
|
| 90 |
+
|
| 91 |
+
In code_as_policy mode: action is a string (model completion with <folds> tags)
|
| 92 |
+
OR a list of fold dicts already parsed.
|
| 93 |
+
In step mode: action is a string (single fold JSON) or dict.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
(observation, reward, done, info)
|
| 97 |
+
"""
|
| 98 |
+
if self.mode == 'code_as_policy':
|
| 99 |
+
return self._step_sequence(action)
|
| 100 |
+
else:
|
| 101 |
+
return self._step_single(action)
|
| 102 |
+
|
| 103 |
+
def _step_sequence(self, action) -> tuple[dict, dict, bool, dict]:
|
| 104 |
+
"""Execute a complete fold sequence (code-as-policy mode)."""
|
| 105 |
+
# Parse action if it's a string
|
| 106 |
+
if isinstance(action, str):
|
| 107 |
+
try:
|
| 108 |
+
folds = parse_fold_list(action)
|
| 109 |
+
except ValueError as e:
|
| 110 |
+
bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
|
| 111 |
+
return self._get_observation(), bad_reward, True, self._info()
|
| 112 |
+
else:
|
| 113 |
+
folds = action # already a list of dicts
|
| 114 |
+
|
| 115 |
+
# Execute each fold sequentially
|
| 116 |
+
last_result = {'valid': True, 'anchored': True, 'new_vertices': [], 'errors': []}
|
| 117 |
+
for fold in folds:
|
| 118 |
+
try:
|
| 119 |
+
p1 = fold['from']
|
| 120 |
+
p2 = fold['to']
|
| 121 |
+
assignment = fold['assignment']
|
| 122 |
+
except (KeyError, TypeError) as e:
|
| 123 |
+
last_result = {'valid': False, 'anchored': False, 'new_vertices': [], 'errors': [str(e)]}
|
| 124 |
+
break
|
| 125 |
+
|
| 126 |
+
last_result = self.paper.add_crease(p1, p2, assignment)
|
| 127 |
+
self.step_count += 1
|
| 128 |
+
if not last_result['valid']:
|
| 129 |
+
break # stop at first invalid fold, partial credit
|
| 130 |
+
|
| 131 |
+
reward = compute_terminal_reward(self.paper, self.target)
|
| 132 |
+
self.last_reward = reward
|
| 133 |
+
return self._get_observation(), reward, True, self._info()
|
| 134 |
+
|
| 135 |
+
def _step_single(self, action) -> tuple[dict, dict, bool, dict]:
|
| 136 |
+
"""Execute a single fold (step mode)."""
|
| 137 |
+
if isinstance(action, str):
|
| 138 |
+
try:
|
| 139 |
+
fold = parse_single_fold(action)
|
| 140 |
+
except ValueError as e:
|
| 141 |
+
bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
|
| 142 |
+
self.last_reward = bad_reward
|
| 143 |
+
done = self.step_count >= self.max_steps
|
| 144 |
+
return self._get_observation(), bad_reward, done, self._info()
|
| 145 |
+
else:
|
| 146 |
+
fold = action
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
p1 = fold['from']
|
| 150 |
+
p2 = fold['to']
|
| 151 |
+
assignment = fold['assignment']
|
| 152 |
+
except (KeyError, TypeError) as e:
|
| 153 |
+
bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
|
| 154 |
+
self.last_reward = bad_reward
|
| 155 |
+
done = self.step_count >= self.max_steps
|
| 156 |
+
return self._get_observation(), bad_reward, done, self._info()
|
| 157 |
+
|
| 158 |
+
result = self.paper.add_crease(p1, p2, assignment)
|
| 159 |
+
self.step_count += 1
|
| 160 |
+
|
| 161 |
+
reward = compute_reward(self.paper, result, self.target)
|
| 162 |
+
self.last_reward = reward
|
| 163 |
+
|
| 164 |
+
done = (
|
| 165 |
+
self.step_count >= self.max_steps or
|
| 166 |
+
reward.get('completion', 0) > 0
|
| 167 |
+
)
|
| 168 |
+
return self._get_observation(), reward, done, self._info()
|
| 169 |
+
|
| 170 |
+
def _get_observation(self) -> dict:
|
| 171 |
+
"""Returns observation dict with the LLM prompt and raw state."""
|
| 172 |
+
if self.mode == 'code_as_policy':
|
| 173 |
+
prompt = code_as_policy_prompt(self.target, max_folds=self.max_steps)
|
| 174 |
+
else:
|
| 175 |
+
prompt = step_level_prompt(
|
| 176 |
+
target=self.target,
|
| 177 |
+
paper_state=self.paper,
|
| 178 |
+
step=self.step_count,
|
| 179 |
+
max_steps=self.max_steps,
|
| 180 |
+
last_reward=self.last_reward,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
return {
|
| 184 |
+
'prompt': prompt,
|
| 185 |
+
'target_name': self.target_name,
|
| 186 |
+
'step': self.step_count,
|
| 187 |
+
'paper_fold_json': self.paper.graph.edges if self.paper else {},
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
def _info(self) -> dict:
|
| 191 |
+
"""Returns diagnostic info dict for logging."""
|
| 192 |
+
if self.paper is None:
|
| 193 |
+
return {}
|
| 194 |
+
|
| 195 |
+
interior = self.paper.graph.interior_vertices()
|
| 196 |
+
vertex_scores = check_all_vertices(self.paper.graph)
|
| 197 |
+
|
| 198 |
+
return {
|
| 199 |
+
'local_foldability': (
|
| 200 |
+
vertex_scores['kawasaki'] == 1.0 and
|
| 201 |
+
vertex_scores['maekawa'] == 1.0
|
| 202 |
+
),
|
| 203 |
+
'blb_satisfied': vertex_scores['blb'] == 1.0,
|
| 204 |
+
'global_foldability': 'not_checked', # NP-complete (Bern-Hayes 1996)
|
| 205 |
+
'n_interior_vertices': len(interior),
|
| 206 |
+
'n_creases': len(self.paper.graph.crease_edges()),
|
| 207 |
+
'target_name': self.target_name,
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
def state(self) -> dict:
|
| 211 |
+
"""Returns current environment state for logging/inspection."""
|
| 212 |
+
return {
|
| 213 |
+
'paper': {
|
| 214 |
+
'vertices': dict(self.paper.graph.vertices),
|
| 215 |
+
'edges': {
|
| 216 |
+
k: v for k, v in self.paper.graph.edges.items()
|
| 217 |
+
if v[2] in ('M', 'V')
|
| 218 |
+
},
|
| 219 |
+
'fold_history': self.paper.fold_history,
|
| 220 |
+
},
|
| 221 |
+
'target': self.target_name,
|
| 222 |
+
'step': self.step_count,
|
| 223 |
+
'mode': self.mode,
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
def close(self):
|
| 227 |
+
"""Cleanup."""
|
| 228 |
+
pass
|
| 229 |
+
|
| 230 |
+
def clone(self) -> 'OrigamiEnvironment':
|
| 231 |
+
"""Return a deep copy for parallel evaluation (used in GRPO)."""
|
| 232 |
+
new_env = OrigamiEnvironment(
|
| 233 |
+
mode=self.mode,
|
| 234 |
+
max_steps=self.max_steps,
|
| 235 |
+
targets_dir=str(self.targets_dir),
|
| 236 |
+
)
|
| 237 |
+
if self.paper is not None:
|
| 238 |
+
new_env.paper = copy.deepcopy(self.paper)
|
| 239 |
+
new_env.target = self.target
|
| 240 |
+
new_env.target_name = self.target_name
|
| 241 |
+
new_env.step_count = self.step_count
|
| 242 |
+
new_env.last_reward = self.last_reward
|
| 243 |
+
return new_env
|
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
BOUNDARY_TOL = 1e-9
|
| 5 |
+
VERTEX_TOL = 1e-9
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class CreaseGraph:
|
| 9 |
+
"""
|
| 10 |
+
Planar graph representing an origami crease pattern on a unit square.
|
| 11 |
+
|
| 12 |
+
Vertices: points in [0,1]x[0,1], deduplicated by proximity.
|
| 13 |
+
Edges: segments between vertices, labeled M (mountain), V (valley), or B (boundary).
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.vertices: dict[int, tuple[float, float]] = {}
|
| 18 |
+
self.edges: dict[int, tuple[int, int, str]] = {}
|
| 19 |
+
self.vertex_edges: dict[int, list[int]] = {}
|
| 20 |
+
self._next_vertex_id: int = 0
|
| 21 |
+
self._next_edge_id: int = 0
|
| 22 |
+
|
| 23 |
+
corners = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
|
| 24 |
+
for x, y in corners:
|
| 25 |
+
vid = self._next_vertex_id
|
| 26 |
+
self.vertices[vid] = (x, y)
|
| 27 |
+
self.vertex_edges[vid] = []
|
| 28 |
+
self._next_vertex_id += 1
|
| 29 |
+
|
| 30 |
+
boundary_pairs = [(0, 1), (1, 2), (2, 3), (3, 0)]
|
| 31 |
+
for v1, v2 in boundary_pairs:
|
| 32 |
+
eid = self._next_edge_id
|
| 33 |
+
self.edges[eid] = (v1, v2, 'B')
|
| 34 |
+
self.vertex_edges[v1].append(eid)
|
| 35 |
+
self.vertex_edges[v2].append(eid)
|
| 36 |
+
self._next_edge_id += 1
|
| 37 |
+
|
| 38 |
+
def add_vertex(self, x: float, y: float) -> int:
|
| 39 |
+
for vid, (vx, vy) in self.vertices.items():
|
| 40 |
+
if abs(vx - x) < VERTEX_TOL and abs(vy - y) < VERTEX_TOL:
|
| 41 |
+
return vid
|
| 42 |
+
vid = self._next_vertex_id
|
| 43 |
+
self.vertices[vid] = (float(x), float(y))
|
| 44 |
+
self.vertex_edges[vid] = []
|
| 45 |
+
self._next_vertex_id += 1
|
| 46 |
+
return vid
|
| 47 |
+
|
| 48 |
+
def add_edge(self, v1_id: int, v2_id: int, assignment: str) -> int:
|
| 49 |
+
pair = frozenset((v1_id, v2_id))
|
| 50 |
+
for eid, (ev1, ev2, _) in self.edges.items():
|
| 51 |
+
if frozenset((ev1, ev2)) == pair:
|
| 52 |
+
return eid
|
| 53 |
+
eid = self._next_edge_id
|
| 54 |
+
self.edges[eid] = (v1_id, v2_id, assignment)
|
| 55 |
+
self.vertex_edges[v1_id].append(eid)
|
| 56 |
+
self.vertex_edges[v2_id].append(eid)
|
| 57 |
+
self._next_edge_id += 1
|
| 58 |
+
return eid
|
| 59 |
+
|
| 60 |
+
def get_cyclic_edges(self, vertex_id: int) -> list[int]:
|
| 61 |
+
vx, vy = self.vertices[vertex_id]
|
| 62 |
+
edge_ids = self.vertex_edges[vertex_id]
|
| 63 |
+
|
| 64 |
+
def angle_of_edge(eid: int) -> float:
|
| 65 |
+
ev1, ev2, _ = self.edges[eid]
|
| 66 |
+
other_id = ev2 if ev1 == vertex_id else ev1
|
| 67 |
+
ox, oy = self.vertices[other_id]
|
| 68 |
+
return float(np.arctan2(oy - vy, ox - vx))
|
| 69 |
+
|
| 70 |
+
return sorted(edge_ids, key=angle_of_edge)
|
| 71 |
+
|
| 72 |
+
def interior_vertices(self) -> list[int]:
|
| 73 |
+
result = []
|
| 74 |
+
for vid, (x, y) in self.vertices.items():
|
| 75 |
+
if (
|
| 76 |
+
x > BOUNDARY_TOL
|
| 77 |
+
and x < 1.0 - BOUNDARY_TOL
|
| 78 |
+
and y > BOUNDARY_TOL
|
| 79 |
+
and y < 1.0 - BOUNDARY_TOL
|
| 80 |
+
):
|
| 81 |
+
result.append(vid)
|
| 82 |
+
return result
|
| 83 |
+
|
| 84 |
+
def split_edge(self, edge_id: int, new_vertex_id: int) -> tuple[int, int]:
|
| 85 |
+
ev1, ev2, assignment = self.edges[edge_id]
|
| 86 |
+
|
| 87 |
+
del self.edges[edge_id]
|
| 88 |
+
if edge_id in self.vertex_edges[ev1]:
|
| 89 |
+
self.vertex_edges[ev1].remove(edge_id)
|
| 90 |
+
if edge_id in self.vertex_edges[ev2]:
|
| 91 |
+
self.vertex_edges[ev2].remove(edge_id)
|
| 92 |
+
|
| 93 |
+
eid1 = self._next_edge_id
|
| 94 |
+
self.edges[eid1] = (ev1, new_vertex_id, assignment)
|
| 95 |
+
self.vertex_edges[ev1].append(eid1)
|
| 96 |
+
self.vertex_edges[new_vertex_id].append(eid1)
|
| 97 |
+
self._next_edge_id += 1
|
| 98 |
+
|
| 99 |
+
eid2 = self._next_edge_id
|
| 100 |
+
self.edges[eid2] = (new_vertex_id, ev2, assignment)
|
| 101 |
+
self.vertex_edges[new_vertex_id].append(eid2)
|
| 102 |
+
self.vertex_edges[ev2].append(eid2)
|
| 103 |
+
self._next_edge_id += 1
|
| 104 |
+
|
| 105 |
+
return (eid1, eid2)
|
| 106 |
+
|
| 107 |
+
def crease_edges(self) -> list[int]:
|
| 108 |
+
return [eid for eid, (_, _, a) in self.edges.items() if a in ('M', 'V')]
|
| 109 |
+
|
| 110 |
+
def boundary_midpoints(self) -> list[tuple[float, float]]:
|
| 111 |
+
midpoints = []
|
| 112 |
+
for eid, (v1, v2, assignment) in self.edges.items():
|
| 113 |
+
if assignment == 'B':
|
| 114 |
+
x1, y1 = self.vertices[v1]
|
| 115 |
+
x2, y2 = self.vertices[v2]
|
| 116 |
+
midpoints.append(((x1 + x2) / 2.0, (y1 + y2) / 2.0))
|
| 117 |
+
return midpoints
|
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from shapely.geometry import LineString, Point, Polygon
|
| 3 |
+
from shapely.ops import unary_union
|
| 4 |
+
from typing import Optional
|
| 5 |
+
from .graph import CreaseGraph, VERTEX_TOL
|
| 6 |
+
|
| 7 |
+
UNIT_SQUARE_CORNERS = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
|
| 8 |
+
|
| 9 |
+
_UNIT_SQUARE = Polygon(UNIT_SQUARE_CORNERS)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PaperState:
|
| 13 |
+
"""
|
| 14 |
+
Represents the evolving crease pattern on a unit square [0,1]x[0,1].
|
| 15 |
+
Uses CreaseGraph for the underlying data structure.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.graph = CreaseGraph()
|
| 20 |
+
self.fold_history: list[dict] = []
|
| 21 |
+
|
| 22 |
+
def anchor_points(self) -> list[tuple[float, float]]:
|
| 23 |
+
points: dict[tuple[float, float], None] = {}
|
| 24 |
+
for corner in UNIT_SQUARE_CORNERS:
|
| 25 |
+
points[corner] = None
|
| 26 |
+
for vid, (x, y) in self.graph.vertices.items():
|
| 27 |
+
points[(float(x), float(y))] = None
|
| 28 |
+
return list(points.keys())
|
| 29 |
+
|
| 30 |
+
def _is_anchor(self, pt: tuple[float, float]) -> bool:
|
| 31 |
+
px, py = pt
|
| 32 |
+
for ax, ay in self.anchor_points():
|
| 33 |
+
if abs(ax - px) < VERTEX_TOL and abs(ay - py) < VERTEX_TOL:
|
| 34 |
+
return True
|
| 35 |
+
return False
|
| 36 |
+
|
| 37 |
+
def add_crease(self, p1: list, p2: list, assignment: str) -> dict:
|
| 38 |
+
errors: list[str] = []
|
| 39 |
+
|
| 40 |
+
if assignment not in ('M', 'V'):
|
| 41 |
+
return {
|
| 42 |
+
'valid': False,
|
| 43 |
+
'anchored': False,
|
| 44 |
+
'new_vertices': [],
|
| 45 |
+
'errors': ['invalid_assignment'],
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
p1 = (float(p1[0]), float(p1[1]))
|
| 49 |
+
p2 = (float(p2[0]), float(p2[1]))
|
| 50 |
+
|
| 51 |
+
anchored = self._is_anchor(p1) and self._is_anchor(p2)
|
| 52 |
+
|
| 53 |
+
seg_len = np.hypot(p2[0] - p1[0], p2[1] - p1[1])
|
| 54 |
+
if seg_len < VERTEX_TOL:
|
| 55 |
+
errors.append('zero_length')
|
| 56 |
+
return {'valid': False, 'anchored': anchored, 'new_vertices': [], 'errors': errors}
|
| 57 |
+
|
| 58 |
+
new_line = LineString([p1, p2])
|
| 59 |
+
|
| 60 |
+
if not _UNIT_SQUARE.contains(new_line) and not _UNIT_SQUARE.boundary.contains(new_line):
|
| 61 |
+
clipped = new_line.intersection(_UNIT_SQUARE)
|
| 62 |
+
if clipped.is_empty:
|
| 63 |
+
errors.append('outside_bounds')
|
| 64 |
+
return {'valid': False, 'anchored': anchored, 'new_vertices': [], 'errors': errors}
|
| 65 |
+
|
| 66 |
+
intersection_points: list[tuple[float, float]] = []
|
| 67 |
+
|
| 68 |
+
for eid, (ev1, ev2, _) in list(self.graph.edges.items()):
|
| 69 |
+
ex1, ey1 = self.graph.vertices[ev1]
|
| 70 |
+
ex2, ey2 = self.graph.vertices[ev2]
|
| 71 |
+
existing_line = LineString([(ex1, ey1), (ex2, ey2)])
|
| 72 |
+
inter = new_line.intersection(existing_line)
|
| 73 |
+
|
| 74 |
+
if inter.is_empty:
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
if inter.geom_type == 'Point':
|
| 78 |
+
ix, iy = inter.x, inter.y
|
| 79 |
+
ep1 = (ex1, ey1)
|
| 80 |
+
ep2 = (ex2, ey2)
|
| 81 |
+
if (
|
| 82 |
+
abs(ix - ep1[0]) < VERTEX_TOL and abs(iy - ep1[1]) < VERTEX_TOL
|
| 83 |
+
or abs(ix - ep2[0]) < VERTEX_TOL and abs(iy - ep2[1]) < VERTEX_TOL
|
| 84 |
+
):
|
| 85 |
+
continue
|
| 86 |
+
intersection_points.append((ix, iy))
|
| 87 |
+
# MultiPoint or LineString intersections (collinear) are skipped
|
| 88 |
+
|
| 89 |
+
new_vertex_coords: list[tuple[float, float]] = []
|
| 90 |
+
for ix, iy in intersection_points:
|
| 91 |
+
before = set(self.graph.vertices.keys())
|
| 92 |
+
vid = self.graph.add_vertex(ix, iy)
|
| 93 |
+
if vid not in before:
|
| 94 |
+
new_vertex_coords.append((ix, iy))
|
| 95 |
+
|
| 96 |
+
for eid in list(self.graph.edges.keys()):
|
| 97 |
+
if eid not in self.graph.edges:
|
| 98 |
+
continue
|
| 99 |
+
ev1, ev2, _ = self.graph.edges[eid]
|
| 100 |
+
ex1, ey1 = self.graph.vertices[ev1]
|
| 101 |
+
ex2, ey2 = self.graph.vertices[ev2]
|
| 102 |
+
seg = LineString([(ex1, ey1), (ex2, ey2)])
|
| 103 |
+
pt = Point(ix, iy)
|
| 104 |
+
if seg.distance(pt) < VERTEX_TOL:
|
| 105 |
+
if ev1 != vid and ev2 != vid:
|
| 106 |
+
self.graph.split_edge(eid, vid)
|
| 107 |
+
|
| 108 |
+
v1_id = self.graph.add_vertex(p1[0], p1[1])
|
| 109 |
+
v2_id = self.graph.add_vertex(p2[0], p2[1])
|
| 110 |
+
|
| 111 |
+
waypoints = [p1] + sorted(
|
| 112 |
+
intersection_points,
|
| 113 |
+
key=lambda pt: np.hypot(pt[0] - p1[0], pt[1] - p1[1]),
|
| 114 |
+
) + [p2]
|
| 115 |
+
|
| 116 |
+
waypoint_ids = []
|
| 117 |
+
for wp in waypoints:
|
| 118 |
+
wid = self.graph.add_vertex(wp[0], wp[1])
|
| 119 |
+
waypoint_ids.append(wid)
|
| 120 |
+
|
| 121 |
+
for i in range(len(waypoint_ids) - 1):
|
| 122 |
+
wa = waypoint_ids[i]
|
| 123 |
+
wb = waypoint_ids[i + 1]
|
| 124 |
+
if wa != wb:
|
| 125 |
+
self.graph.add_edge(wa, wb, assignment)
|
| 126 |
+
|
| 127 |
+
record = {
|
| 128 |
+
'p1': p1,
|
| 129 |
+
'p2': p2,
|
| 130 |
+
'assignment': assignment,
|
| 131 |
+
'anchored': anchored,
|
| 132 |
+
'new_vertices': new_vertex_coords,
|
| 133 |
+
}
|
| 134 |
+
self.fold_history.append(record)
|
| 135 |
+
|
| 136 |
+
return {
|
| 137 |
+
'valid': True,
|
| 138 |
+
'anchored': anchored,
|
| 139 |
+
'new_vertices': new_vertex_coords,
|
| 140 |
+
'errors': errors,
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
def crease_edges(self) -> list[dict]:
|
| 144 |
+
result = []
|
| 145 |
+
for eid in self.graph.crease_edges():
|
| 146 |
+
v1, v2, assignment = self.graph.edges[eid]
|
| 147 |
+
x1, y1 = self.graph.vertices[v1]
|
| 148 |
+
x2, y2 = self.graph.vertices[v2]
|
| 149 |
+
result.append({'v1': (x1, y1), 'v2': (x2, y2), 'assignment': assignment})
|
| 150 |
+
return result
|
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
_CORNERS = {(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)}
|
| 6 |
+
_BOUNDARY_X = {0.0, 1.0}
|
| 7 |
+
_BOUNDARY_Y = {0.0, 1.0}
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _is_corner(x: float, y: float) -> bool:
|
| 11 |
+
return (round(x, 4), round(y, 4)) in _CORNERS
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _is_boundary(x: float, y: float) -> bool:
|
| 15 |
+
return x in _BOUNDARY_X or y in _BOUNDARY_Y
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def format_target_for_prompt(target: dict) -> str:
|
| 19 |
+
vertices = target["vertices_coords"]
|
| 20 |
+
edges_v = target["edges_vertices"]
|
| 21 |
+
edges_a = target["edges_assignment"]
|
| 22 |
+
|
| 23 |
+
lines = []
|
| 24 |
+
for (v1, v2), assignment in zip(edges_v, edges_a):
|
| 25 |
+
if assignment not in ("M", "V"):
|
| 26 |
+
continue
|
| 27 |
+
x1, y1 = vertices[v1]
|
| 28 |
+
x2, y2 = vertices[v2]
|
| 29 |
+
label = "Mountain" if assignment == "M" else "Valley"
|
| 30 |
+
lines.append(
|
| 31 |
+
f"{label} fold: ({round(x1, 4)}, {round(y1, 4)}) -> ({round(x2, 4)}, {round(y2, 4)})"
|
| 32 |
+
)
|
| 33 |
+
return "\n".join(lines)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def format_anchor_points(paper_state) -> str:
|
| 37 |
+
corners = []
|
| 38 |
+
boundary_pts = []
|
| 39 |
+
intersections = []
|
| 40 |
+
|
| 41 |
+
for x, y in paper_state.anchor_points():
|
| 42 |
+
rx, ry = round(x, 4), round(y, 4)
|
| 43 |
+
if _is_corner(rx, ry):
|
| 44 |
+
corners.append((rx, ry))
|
| 45 |
+
elif _is_boundary(rx, ry):
|
| 46 |
+
boundary_pts.append((rx, ry))
|
| 47 |
+
else:
|
| 48 |
+
intersections.append((rx, ry))
|
| 49 |
+
|
| 50 |
+
def fmt_pts(pts: list[tuple[float, float]]) -> str:
|
| 51 |
+
return " ".join(f"({x},{y})" for x, y in pts)
|
| 52 |
+
|
| 53 |
+
lines = []
|
| 54 |
+
if corners:
|
| 55 |
+
lines.append(f" Corners: {fmt_pts(corners)}")
|
| 56 |
+
if boundary_pts:
|
| 57 |
+
lines.append(f" Boundary pts: {fmt_pts(boundary_pts)}")
|
| 58 |
+
if intersections:
|
| 59 |
+
lines.append(f" Intersections: {fmt_pts(intersections)}")
|
| 60 |
+
|
| 61 |
+
return "\n".join(lines)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def format_crease_history(paper_state) -> str:
|
| 65 |
+
history = paper_state.fold_history
|
| 66 |
+
if not history:
|
| 67 |
+
return "none"
|
| 68 |
+
|
| 69 |
+
lines = []
|
| 70 |
+
for i, fold in enumerate(history, 1):
|
| 71 |
+
p1, p2 = fold["p1"], fold["p2"]
|
| 72 |
+
assignment = fold["assignment"]
|
| 73 |
+
label = "Mountain" if assignment == "M" else "Valley"
|
| 74 |
+
x1, y1 = round(p1[0], 4), round(p1[1], 4)
|
| 75 |
+
x2, y2 = round(p2[0], 4), round(p2[1], 4)
|
| 76 |
+
lines.append(f" {i}. {label} fold: ({x1}, {y1}) -> ({x2}, {y2})")
|
| 77 |
+
|
| 78 |
+
return "\n".join(lines)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def format_reward_feedback(reward: Optional[dict]) -> str:
|
| 82 |
+
if not reward:
|
| 83 |
+
return "(no feedback yet)"
|
| 84 |
+
|
| 85 |
+
keys = ["kawasaki", "maekawa", "blb", "progress", "economy", "total"]
|
| 86 |
+
parts = []
|
| 87 |
+
for k in keys:
|
| 88 |
+
if k in reward:
|
| 89 |
+
parts.append(f"{k}={reward[k]:.2f}")
|
| 90 |
+
|
| 91 |
+
for k, v in reward.items():
|
| 92 |
+
if k not in keys:
|
| 93 |
+
parts.append(f"{k}={v:.2f}")
|
| 94 |
+
|
| 95 |
+
return " " + " ".join(parts)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def code_as_policy_prompt(target: dict, max_folds: int = 8) -> str:
|
| 99 |
+
formatted_target = format_target_for_prompt(target)
|
| 100 |
+
return f"""You are an origami designer. Generate a fold sequence for a unit square [0,1]x[0,1].
|
| 101 |
+
|
| 102 |
+
TARGET CREASE PATTERN:
|
| 103 |
+
{formatted_target}
|
| 104 |
+
|
| 105 |
+
RULES (must hold at every interior vertex):
|
| 106 |
+
- Kawasaki: alternating sector angles sum equally (each half = 180 degrees)
|
| 107 |
+
- Maekawa: |mountain_count - valley_count| = 2
|
| 108 |
+
- Big-Little-Big: folds bounding the smallest sector must have opposite types (one M, one V)
|
| 109 |
+
|
| 110 |
+
INITIAL ANCHOR POINTS (valid fold endpoints — new ones appear when creases intersect):
|
| 111 |
+
Corners: (0.0,0.0) (1.0,0.0) (1.0,1.0) (0.0,1.0)
|
| 112 |
+
Midpoints: (0.0,0.5) (0.5,0.0) (1.0,0.5) (0.5,1.0)
|
| 113 |
+
Note: new anchor points are created at crease intersections.
|
| 114 |
+
|
| 115 |
+
Output at most {max_folds} folds. Both endpoints must be valid anchor points.
|
| 116 |
+
Output ONLY the JSON list, wrapped in <folds> tags:
|
| 117 |
+
|
| 118 |
+
<folds>
|
| 119 |
+
[
|
| 120 |
+
{{"instruction": "Describe the fold in plain English", "from": [x1, y1], "to": [x2, y2], "assignment": "V"}},
|
| 121 |
+
{{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"}}
|
| 122 |
+
]
|
| 123 |
+
</folds>"""
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def step_level_prompt(
|
| 127 |
+
target: dict,
|
| 128 |
+
paper_state,
|
| 129 |
+
step: int,
|
| 130 |
+
max_steps: int,
|
| 131 |
+
last_reward: Optional[dict] = None,
|
| 132 |
+
) -> str:
|
| 133 |
+
formatted_target = format_target_for_prompt(target)
|
| 134 |
+
formatted_history = format_crease_history(paper_state)
|
| 135 |
+
formatted_anchors = format_anchor_points(paper_state)
|
| 136 |
+
formatted_reward = format_reward_feedback(last_reward)
|
| 137 |
+
|
| 138 |
+
return f"""You are an origami designer building a crease pattern step by step.
|
| 139 |
+
|
| 140 |
+
TARGET:
|
| 141 |
+
{formatted_target}
|
| 142 |
+
|
| 143 |
+
CURRENT STATE (step {step} of {max_steps}):
|
| 144 |
+
Creases placed:
|
| 145 |
+
{formatted_history}
|
| 146 |
+
|
| 147 |
+
AVAILABLE ANCHOR POINTS:
|
| 148 |
+
{formatted_anchors}
|
| 149 |
+
|
| 150 |
+
LAST REWARD:
|
| 151 |
+
{formatted_reward}
|
| 152 |
+
|
| 153 |
+
Add the NEXT crease. Both endpoints must be listed anchor points above.
|
| 154 |
+
Output ONLY valid JSON (no extra text):
|
| 155 |
+
{{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M" or "V"}}"""
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def parse_fold_list(completion: str) -> list[dict]:
|
| 159 |
+
match = re.search(r"<folds>(.*?)</folds>", completion, re.IGNORECASE | re.DOTALL)
|
| 160 |
+
if not match:
|
| 161 |
+
raise ValueError("No <folds>...</folds> tags found in completion")
|
| 162 |
+
|
| 163 |
+
raw = match.group(1).strip()
|
| 164 |
+
|
| 165 |
+
try:
|
| 166 |
+
data = json.loads(raw)
|
| 167 |
+
except json.JSONDecodeError as e:
|
| 168 |
+
raise ValueError(f"Failed to parse JSON inside <folds> tags: {e}") from e
|
| 169 |
+
|
| 170 |
+
if not isinstance(data, list):
|
| 171 |
+
raise ValueError(f"Expected a JSON list inside <folds> tags, got {type(data).__name__}")
|
| 172 |
+
|
| 173 |
+
cleaned = []
|
| 174 |
+
for i, item in enumerate(data):
|
| 175 |
+
if not isinstance(item, dict):
|
| 176 |
+
raise ValueError(f"Fold {i} is not a dict: {item!r}")
|
| 177 |
+
|
| 178 |
+
for field in ("from", "to", "assignment"):
|
| 179 |
+
if field not in item:
|
| 180 |
+
raise ValueError(f"Fold {i} missing required field '{field}'")
|
| 181 |
+
|
| 182 |
+
from_pt = item["from"]
|
| 183 |
+
to_pt = item["to"]
|
| 184 |
+
|
| 185 |
+
if (
|
| 186 |
+
not isinstance(from_pt, list)
|
| 187 |
+
or len(from_pt) != 2
|
| 188 |
+
or not all(isinstance(v, (int, float)) for v in from_pt)
|
| 189 |
+
):
|
| 190 |
+
raise ValueError(f"Fold {i} 'from' must be a list of 2 numbers, got {from_pt!r}")
|
| 191 |
+
|
| 192 |
+
if (
|
| 193 |
+
not isinstance(to_pt, list)
|
| 194 |
+
or len(to_pt) != 2
|
| 195 |
+
or not all(isinstance(v, (int, float)) for v in to_pt)
|
| 196 |
+
):
|
| 197 |
+
raise ValueError(f"Fold {i} 'to' must be a list of 2 numbers, got {to_pt!r}")
|
| 198 |
+
|
| 199 |
+
if not isinstance(item["assignment"], str):
|
| 200 |
+
raise ValueError(f"Fold {i} 'assignment' must be a string")
|
| 201 |
+
|
| 202 |
+
cleaned.append(
|
| 203 |
+
{
|
| 204 |
+
"from": [float(from_pt[0]), float(from_pt[1])],
|
| 205 |
+
"to": [float(to_pt[0]), float(to_pt[1])],
|
| 206 |
+
"assignment": item["assignment"],
|
| 207 |
+
"instruction": item.get("instruction", ""),
|
| 208 |
+
}
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
return cleaned
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def parse_single_fold(completion: str) -> dict:
|
| 215 |
+
start = completion.find("{")
|
| 216 |
+
end = completion.rfind("}")
|
| 217 |
+
|
| 218 |
+
if start == -1 or end == -1 or end <= start:
|
| 219 |
+
raise ValueError("No JSON object found in completion")
|
| 220 |
+
|
| 221 |
+
raw = completion[start : end + 1]
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
data = json.loads(raw)
|
| 225 |
+
except json.JSONDecodeError as e:
|
| 226 |
+
raise ValueError(f"Failed to parse JSON from completion: {e}") from e
|
| 227 |
+
|
| 228 |
+
if not isinstance(data, dict):
|
| 229 |
+
raise ValueError(f"Expected a JSON object, got {type(data).__name__}")
|
| 230 |
+
|
| 231 |
+
for field in ("from", "to", "assignment"):
|
| 232 |
+
if field not in data:
|
| 233 |
+
raise ValueError(f"Missing required field '{field}' in fold JSON")
|
| 234 |
+
|
| 235 |
+
return data
|
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from .verifier import check_all_vertices, geometric_crease_coverage
|
| 3 |
+
from .paper_state import PaperState
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def load_target(target_path: str) -> dict:
|
| 7 |
+
"""Load a .fold target file and return it as a dict."""
|
| 8 |
+
with open(target_path) as f:
|
| 9 |
+
return json.load(f)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def target_crease_edges(target: dict) -> list[dict]:
|
| 13 |
+
"""
|
| 14 |
+
Extract crease edges from a FOLD target dict as list of
|
| 15 |
+
{'v1': (x1,y1), 'v2': (x2,y2), 'assignment': 'M'|'V'} dicts.
|
| 16 |
+
"""
|
| 17 |
+
verts = target['vertices_coords']
|
| 18 |
+
result = []
|
| 19 |
+
for i, (v1_idx, v2_idx) in enumerate(target['edges_vertices']):
|
| 20 |
+
assignment = target['edges_assignment'][i]
|
| 21 |
+
if assignment in ('M', 'V'):
|
| 22 |
+
result.append({
|
| 23 |
+
'v1': tuple(verts[v1_idx]),
|
| 24 |
+
'v2': tuple(verts[v2_idx]),
|
| 25 |
+
'assignment': assignment,
|
| 26 |
+
})
|
| 27 |
+
return result
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def compute_reward(
|
| 31 |
+
state: PaperState,
|
| 32 |
+
action_result: dict,
|
| 33 |
+
target: dict,
|
| 34 |
+
) -> dict:
|
| 35 |
+
"""
|
| 36 |
+
Compute the full reward dict for a fold action.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
state: current PaperState AFTER the action was applied
|
| 40 |
+
action_result: {'valid': bool, 'anchored': bool, 'new_vertices': list, 'errors': list}
|
| 41 |
+
target: FOLD target dict
|
| 42 |
+
|
| 43 |
+
Returns dict with keys:
|
| 44 |
+
format, anchored, kawasaki, maekawa, blb, progress, economy, completion, efficiency, total
|
| 45 |
+
"""
|
| 46 |
+
r = {}
|
| 47 |
+
|
| 48 |
+
# Gate 1: format — did the action parse and apply?
|
| 49 |
+
r['format'] = 1.0 if action_result.get('valid', False) else 0.0
|
| 50 |
+
if not r['format']:
|
| 51 |
+
r['total'] = -0.1
|
| 52 |
+
return r
|
| 53 |
+
|
| 54 |
+
# Gate 2: anchoring — were endpoints valid anchor points?
|
| 55 |
+
r['anchored'] = 1.0 if action_result.get('anchored', False) else 0.3
|
| 56 |
+
|
| 57 |
+
# Vertex-level validity checks (all interior vertices)
|
| 58 |
+
vertex_scores = check_all_vertices(state.graph)
|
| 59 |
+
r['kawasaki'] = vertex_scores['kawasaki']
|
| 60 |
+
r['maekawa'] = vertex_scores['maekawa']
|
| 61 |
+
r['blb'] = vertex_scores['blb']
|
| 62 |
+
|
| 63 |
+
# Geometric progress
|
| 64 |
+
t_edges = target_crease_edges(target)
|
| 65 |
+
coverage, economy = geometric_crease_coverage(state, t_edges)
|
| 66 |
+
r['progress'] = coverage
|
| 67 |
+
r['economy'] = economy
|
| 68 |
+
|
| 69 |
+
# Completion bonus: high coverage + all vertex conditions satisfied
|
| 70 |
+
all_valid = (r['kawasaki'] == 1.0 and r['maekawa'] == 1.0 and r['blb'] == 1.0)
|
| 71 |
+
r['completion'] = 10.0 if (r['progress'] > 0.9 and all_valid) else 0.0
|
| 72 |
+
|
| 73 |
+
# Step cost
|
| 74 |
+
r['efficiency'] = -0.01
|
| 75 |
+
|
| 76 |
+
# Weighted total
|
| 77 |
+
r['total'] = (
|
| 78 |
+
0.05 * r['anchored'] +
|
| 79 |
+
0.08 * r['kawasaki'] +
|
| 80 |
+
0.07 * r['maekawa'] +
|
| 81 |
+
0.05 * r['blb'] +
|
| 82 |
+
0.45 * r['progress'] +
|
| 83 |
+
0.10 * r['economy'] +
|
| 84 |
+
r['completion'] +
|
| 85 |
+
r['efficiency']
|
| 86 |
+
)
|
| 87 |
+
return r
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def compute_terminal_reward(state: PaperState, target: dict) -> dict:
|
| 91 |
+
"""Compute reward for the final state after a complete fold sequence."""
|
| 92 |
+
fake_result = {'valid': True, 'anchored': True, 'new_vertices': [], 'errors': []}
|
| 93 |
+
return compute_reward(state, fake_result, target)
|
|
File without changes
|
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.25],
|
| 8 |
+
[1.0, 0.25],
|
| 9 |
+
[0.0, 0.5],
|
| 10 |
+
[1.0, 0.5],
|
| 11 |
+
[0.0, 0.75],
|
| 12 |
+
[1.0, 0.75]
|
| 13 |
+
],
|
| 14 |
+
"edges_vertices": [
|
| 15 |
+
[0, 1],
|
| 16 |
+
[1, 5],
|
| 17 |
+
[5, 7],
|
| 18 |
+
[7, 9],
|
| 19 |
+
[9, 2],
|
| 20 |
+
[2, 3],
|
| 21 |
+
[3, 8],
|
| 22 |
+
[8, 6],
|
| 23 |
+
[6, 4],
|
| 24 |
+
[4, 0],
|
| 25 |
+
[4, 5],
|
| 26 |
+
[6, 7],
|
| 27 |
+
[8, 9]
|
| 28 |
+
],
|
| 29 |
+
"edges_assignment": [
|
| 30 |
+
"B",
|
| 31 |
+
"B",
|
| 32 |
+
"B",
|
| 33 |
+
"B",
|
| 34 |
+
"B",
|
| 35 |
+
"B",
|
| 36 |
+
"B",
|
| 37 |
+
"B",
|
| 38 |
+
"B",
|
| 39 |
+
"B",
|
| 40 |
+
"V",
|
| 41 |
+
"M",
|
| 42 |
+
"V"
|
| 43 |
+
],
|
| 44 |
+
"edges_foldAngle": [
|
| 45 |
+
0,
|
| 46 |
+
0,
|
| 47 |
+
0,
|
| 48 |
+
0,
|
| 49 |
+
0,
|
| 50 |
+
0,
|
| 51 |
+
0,
|
| 52 |
+
0,
|
| 53 |
+
0,
|
| 54 |
+
0,
|
| 55 |
+
-180,
|
| 56 |
+
-180,
|
| 57 |
+
-180
|
| 58 |
+
],
|
| 59 |
+
"faces_vertices": [
|
| 60 |
+
[0, 1, 5, 4],
|
| 61 |
+
[4, 5, 7, 6],
|
| 62 |
+
[6, 7, 9, 8],
|
| 63 |
+
[8, 9, 2, 3]
|
| 64 |
+
],
|
| 65 |
+
"level": 3,
|
| 66 |
+
"description": "Three alternating horizontal folds at y=0.25 (valley), y=0.5 (mountain), y=0.75 (valley) forming an accordion"
|
| 67 |
+
}
|
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.2],
|
| 8 |
+
[1.0, 0.2],
|
| 9 |
+
[0.0, 0.4],
|
| 10 |
+
[1.0, 0.4],
|
| 11 |
+
[0.0, 0.6],
|
| 12 |
+
[1.0, 0.6],
|
| 13 |
+
[0.0, 0.8],
|
| 14 |
+
[1.0, 0.8]
|
| 15 |
+
],
|
| 16 |
+
"edges_vertices": [
|
| 17 |
+
[0, 1],
|
| 18 |
+
[1, 5],
|
| 19 |
+
[5, 7],
|
| 20 |
+
[7, 9],
|
| 21 |
+
[9, 11],
|
| 22 |
+
[11, 2],
|
| 23 |
+
[2, 3],
|
| 24 |
+
[3, 10],
|
| 25 |
+
[10, 8],
|
| 26 |
+
[8, 6],
|
| 27 |
+
[6, 4],
|
| 28 |
+
[4, 0],
|
| 29 |
+
[4, 5],
|
| 30 |
+
[6, 7],
|
| 31 |
+
[8, 9],
|
| 32 |
+
[10, 11]
|
| 33 |
+
],
|
| 34 |
+
"edges_assignment": [
|
| 35 |
+
"B",
|
| 36 |
+
"B",
|
| 37 |
+
"B",
|
| 38 |
+
"B",
|
| 39 |
+
"B",
|
| 40 |
+
"B",
|
| 41 |
+
"B",
|
| 42 |
+
"B",
|
| 43 |
+
"B",
|
| 44 |
+
"B",
|
| 45 |
+
"B",
|
| 46 |
+
"B",
|
| 47 |
+
"V",
|
| 48 |
+
"M",
|
| 49 |
+
"V",
|
| 50 |
+
"M"
|
| 51 |
+
],
|
| 52 |
+
"edges_foldAngle": [
|
| 53 |
+
0,
|
| 54 |
+
0,
|
| 55 |
+
0,
|
| 56 |
+
0,
|
| 57 |
+
0,
|
| 58 |
+
0,
|
| 59 |
+
0,
|
| 60 |
+
0,
|
| 61 |
+
0,
|
| 62 |
+
0,
|
| 63 |
+
0,
|
| 64 |
+
0,
|
| 65 |
+
-180,
|
| 66 |
+
-180,
|
| 67 |
+
-180,
|
| 68 |
+
-180
|
| 69 |
+
],
|
| 70 |
+
"faces_vertices": [
|
| 71 |
+
[0, 1, 5, 4],
|
| 72 |
+
[4, 5, 7, 6],
|
| 73 |
+
[6, 7, 9, 8],
|
| 74 |
+
[8, 9, 11, 10],
|
| 75 |
+
[10, 11, 2, 3]
|
| 76 |
+
],
|
| 77 |
+
"level": 3,
|
| 78 |
+
"description": "Four alternating horizontal folds at y=0.2 (valley), y=0.4 (mountain), y=0.6 (valley), y=0.8 (mountain) forming an accordion"
|
| 79 |
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0]
|
| 7 |
+
],
|
| 8 |
+
"edges_vertices": [
|
| 9 |
+
[0, 1],
|
| 10 |
+
[1, 2],
|
| 11 |
+
[2, 3],
|
| 12 |
+
[3, 0],
|
| 13 |
+
[1, 3]
|
| 14 |
+
],
|
| 15 |
+
"edges_assignment": [
|
| 16 |
+
"B",
|
| 17 |
+
"B",
|
| 18 |
+
"B",
|
| 19 |
+
"B",
|
| 20 |
+
"M"
|
| 21 |
+
],
|
| 22 |
+
"edges_foldAngle": [
|
| 23 |
+
0,
|
| 24 |
+
0,
|
| 25 |
+
0,
|
| 26 |
+
0,
|
| 27 |
+
-180
|
| 28 |
+
],
|
| 29 |
+
"faces_vertices": [
|
| 30 |
+
[0, 1, 3],
|
| 31 |
+
[1, 2, 3]
|
| 32 |
+
],
|
| 33 |
+
"level": 1,
|
| 34 |
+
"description": "One mountain fold along the anti-diagonal from (1,0) to (0,1)"
|
| 35 |
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0]
|
| 7 |
+
],
|
| 8 |
+
"edges_vertices": [
|
| 9 |
+
[0, 1],
|
| 10 |
+
[1, 2],
|
| 11 |
+
[2, 3],
|
| 12 |
+
[3, 0],
|
| 13 |
+
[0, 2]
|
| 14 |
+
],
|
| 15 |
+
"edges_assignment": [
|
| 16 |
+
"B",
|
| 17 |
+
"B",
|
| 18 |
+
"B",
|
| 19 |
+
"B",
|
| 20 |
+
"V"
|
| 21 |
+
],
|
| 22 |
+
"edges_foldAngle": [
|
| 23 |
+
0,
|
| 24 |
+
0,
|
| 25 |
+
0,
|
| 26 |
+
0,
|
| 27 |
+
-180
|
| 28 |
+
],
|
| 29 |
+
"faces_vertices": [
|
| 30 |
+
[0, 1, 2],
|
| 31 |
+
[0, 2, 3]
|
| 32 |
+
],
|
| 33 |
+
"level": 1,
|
| 34 |
+
"description": "One valley fold along the main diagonal from (0,0) to (1,1)"
|
| 35 |
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.5],
|
| 8 |
+
[1.0, 0.5]
|
| 9 |
+
],
|
| 10 |
+
"edges_vertices": [
|
| 11 |
+
[0, 1],
|
| 12 |
+
[1, 5],
|
| 13 |
+
[5, 2],
|
| 14 |
+
[2, 3],
|
| 15 |
+
[3, 4],
|
| 16 |
+
[4, 0],
|
| 17 |
+
[4, 5]
|
| 18 |
+
],
|
| 19 |
+
"edges_assignment": [
|
| 20 |
+
"B",
|
| 21 |
+
"B",
|
| 22 |
+
"B",
|
| 23 |
+
"B",
|
| 24 |
+
"B",
|
| 25 |
+
"B",
|
| 26 |
+
"V"
|
| 27 |
+
],
|
| 28 |
+
"edges_foldAngle": [
|
| 29 |
+
0,
|
| 30 |
+
0,
|
| 31 |
+
0,
|
| 32 |
+
0,
|
| 33 |
+
0,
|
| 34 |
+
0,
|
| 35 |
+
-180
|
| 36 |
+
],
|
| 37 |
+
"faces_vertices": [
|
| 38 |
+
[0, 1, 5, 4],
|
| 39 |
+
[4, 5, 2, 3]
|
| 40 |
+
],
|
| 41 |
+
"level": 1,
|
| 42 |
+
"description": "One valley fold along y=0.5, folding the paper in half horizontally"
|
| 43 |
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.5, 0.0],
|
| 8 |
+
[0.5, 1.0]
|
| 9 |
+
],
|
| 10 |
+
"edges_vertices": [
|
| 11 |
+
[0, 4],
|
| 12 |
+
[4, 1],
|
| 13 |
+
[1, 2],
|
| 14 |
+
[2, 5],
|
| 15 |
+
[5, 3],
|
| 16 |
+
[3, 0],
|
| 17 |
+
[4, 5]
|
| 18 |
+
],
|
| 19 |
+
"edges_assignment": [
|
| 20 |
+
"B",
|
| 21 |
+
"B",
|
| 22 |
+
"B",
|
| 23 |
+
"B",
|
| 24 |
+
"B",
|
| 25 |
+
"B",
|
| 26 |
+
"M"
|
| 27 |
+
],
|
| 28 |
+
"edges_foldAngle": [
|
| 29 |
+
0,
|
| 30 |
+
0,
|
| 31 |
+
0,
|
| 32 |
+
0,
|
| 33 |
+
0,
|
| 34 |
+
0,
|
| 35 |
+
-180
|
| 36 |
+
],
|
| 37 |
+
"faces_vertices": [
|
| 38 |
+
[0, 4, 5, 3],
|
| 39 |
+
[4, 1, 2, 5]
|
| 40 |
+
],
|
| 41 |
+
"level": 1,
|
| 42 |
+
"description": "One mountain fold along x=0.5, folding the paper in half vertically"
|
| 43 |
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.3333333333333333],
|
| 8 |
+
[1.0, 0.3333333333333333],
|
| 9 |
+
[0.0, 0.6666666666666666],
|
| 10 |
+
[1.0, 0.6666666666666666]
|
| 11 |
+
],
|
| 12 |
+
"edges_vertices": [
|
| 13 |
+
[0, 1],
|
| 14 |
+
[1, 5],
|
| 15 |
+
[5, 7],
|
| 16 |
+
[7, 2],
|
| 17 |
+
[2, 3],
|
| 18 |
+
[3, 6],
|
| 19 |
+
[6, 4],
|
| 20 |
+
[4, 0],
|
| 21 |
+
[4, 5],
|
| 22 |
+
[6, 7]
|
| 23 |
+
],
|
| 24 |
+
"edges_assignment": [
|
| 25 |
+
"B",
|
| 26 |
+
"B",
|
| 27 |
+
"B",
|
| 28 |
+
"B",
|
| 29 |
+
"B",
|
| 30 |
+
"B",
|
| 31 |
+
"B",
|
| 32 |
+
"B",
|
| 33 |
+
"V",
|
| 34 |
+
"V"
|
| 35 |
+
],
|
| 36 |
+
"edges_foldAngle": [
|
| 37 |
+
0,
|
| 38 |
+
0,
|
| 39 |
+
0,
|
| 40 |
+
0,
|
| 41 |
+
0,
|
| 42 |
+
0,
|
| 43 |
+
0,
|
| 44 |
+
0,
|
| 45 |
+
-180,
|
| 46 |
+
-180
|
| 47 |
+
],
|
| 48 |
+
"faces_vertices": [
|
| 49 |
+
[0, 1, 5, 4],
|
| 50 |
+
[4, 5, 7, 6],
|
| 51 |
+
[6, 7, 2, 3]
|
| 52 |
+
],
|
| 53 |
+
"level": 2,
|
| 54 |
+
"description": "Two parallel valley folds at y=1/3 and y=2/3, dividing the paper into horizontal thirds"
|
| 55 |
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.3333333333333333, 0.0],
|
| 8 |
+
[0.6666666666666666, 0.0],
|
| 9 |
+
[0.3333333333333333, 1.0],
|
| 10 |
+
[0.6666666666666666, 1.0]
|
| 11 |
+
],
|
| 12 |
+
"edges_vertices": [
|
| 13 |
+
[0, 4],
|
| 14 |
+
[4, 5],
|
| 15 |
+
[5, 1],
|
| 16 |
+
[1, 2],
|
| 17 |
+
[2, 7],
|
| 18 |
+
[7, 6],
|
| 19 |
+
[6, 3],
|
| 20 |
+
[3, 0],
|
| 21 |
+
[4, 6],
|
| 22 |
+
[5, 7]
|
| 23 |
+
],
|
| 24 |
+
"edges_assignment": [
|
| 25 |
+
"B",
|
| 26 |
+
"B",
|
| 27 |
+
"B",
|
| 28 |
+
"B",
|
| 29 |
+
"B",
|
| 30 |
+
"B",
|
| 31 |
+
"B",
|
| 32 |
+
"B",
|
| 33 |
+
"M",
|
| 34 |
+
"M"
|
| 35 |
+
],
|
| 36 |
+
"edges_foldAngle": [
|
| 37 |
+
0,
|
| 38 |
+
0,
|
| 39 |
+
0,
|
| 40 |
+
0,
|
| 41 |
+
0,
|
| 42 |
+
0,
|
| 43 |
+
0,
|
| 44 |
+
0,
|
| 45 |
+
-180,
|
| 46 |
+
-180
|
| 47 |
+
],
|
| 48 |
+
"faces_vertices": [
|
| 49 |
+
[0, 4, 6, 3],
|
| 50 |
+
[4, 5, 7, 6],
|
| 51 |
+
[5, 1, 2, 7]
|
| 52 |
+
],
|
| 53 |
+
"level": 2,
|
| 54 |
+
"description": "Two parallel mountain folds at x=1/3 and x=2/3, dividing the paper into vertical thirds"
|
| 55 |
+
}
|
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Validates all .fold target files against origami theorems.
|
| 3 |
+
Run directly: python -m env.targets.validator
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
from ..graph import CreaseGraph
|
| 11 |
+
from ..verifier import check_kawasaki_at_vertex, check_maekawa_at_vertex, check_blb_at_vertex
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_graph_from_fold(fold_data: dict) -> CreaseGraph:
|
| 15 |
+
"""
|
| 16 |
+
Reconstruct a CreaseGraph from a FOLD JSON dict.
|
| 17 |
+
Used to validate target files.
|
| 18 |
+
"""
|
| 19 |
+
graph = CreaseGraph()
|
| 20 |
+
|
| 21 |
+
verts = fold_data['vertices_coords']
|
| 22 |
+
edges = fold_data['edges_vertices']
|
| 23 |
+
assignments = fold_data['edges_assignment']
|
| 24 |
+
|
| 25 |
+
# Map file vertex indices to graph vertex IDs
|
| 26 |
+
vert_map = {}
|
| 27 |
+
for i, (x, y) in enumerate(verts):
|
| 28 |
+
vid = graph.add_vertex(float(x), float(y))
|
| 29 |
+
vert_map[i] = vid
|
| 30 |
+
|
| 31 |
+
# Add edges (boundary edges from init may already exist, add_edge handles dedup)
|
| 32 |
+
for i, (v1_idx, v2_idx) in enumerate(edges):
|
| 33 |
+
v1_id = vert_map[v1_idx]
|
| 34 |
+
v2_id = vert_map[v2_idx]
|
| 35 |
+
assignment = assignments[i]
|
| 36 |
+
graph.add_edge(v1_id, v2_id, assignment)
|
| 37 |
+
|
| 38 |
+
return graph
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def validate_target(fold_path: str) -> dict:
|
| 42 |
+
"""
|
| 43 |
+
Validate a single .fold target file.
|
| 44 |
+
Returns {'file': str, 'valid': bool, 'issues': list[str], 'interior_vertices': int}
|
| 45 |
+
"""
|
| 46 |
+
with open(fold_path) as f:
|
| 47 |
+
fold_data = json.load(f)
|
| 48 |
+
|
| 49 |
+
issues = []
|
| 50 |
+
|
| 51 |
+
# Basic structure checks
|
| 52 |
+
required = ['vertices_coords', 'edges_vertices', 'edges_assignment', 'edges_foldAngle']
|
| 53 |
+
for field in required:
|
| 54 |
+
if field not in fold_data:
|
| 55 |
+
issues.append(f"Missing field: {field}")
|
| 56 |
+
|
| 57 |
+
if issues:
|
| 58 |
+
return {'file': os.path.basename(fold_path), 'valid': False, 'issues': issues, 'interior_vertices': -1}
|
| 59 |
+
|
| 60 |
+
n_edges = len(fold_data['edges_vertices'])
|
| 61 |
+
if len(fold_data['edges_assignment']) != n_edges:
|
| 62 |
+
issues.append("edges_assignment length mismatch")
|
| 63 |
+
if len(fold_data['edges_foldAngle']) != n_edges:
|
| 64 |
+
issues.append("edges_foldAngle length mismatch")
|
| 65 |
+
|
| 66 |
+
# Build graph and check theorems
|
| 67 |
+
graph = build_graph_from_fold(fold_data)
|
| 68 |
+
interior = graph.interior_vertices()
|
| 69 |
+
|
| 70 |
+
for v_id in interior:
|
| 71 |
+
ok, alt_sum = check_kawasaki_at_vertex(v_id, graph)
|
| 72 |
+
if not ok:
|
| 73 |
+
issues.append(f"Kawasaki violated at vertex {v_id} (alt_sum={alt_sum:.6f})")
|
| 74 |
+
|
| 75 |
+
if not check_maekawa_at_vertex(v_id, graph):
|
| 76 |
+
issues.append(f"Maekawa violated at vertex {v_id}")
|
| 77 |
+
|
| 78 |
+
blb_violations = check_blb_at_vertex(v_id, graph)
|
| 79 |
+
if blb_violations:
|
| 80 |
+
issues.append(f"BLB violated at vertex {v_id}: {blb_violations}")
|
| 81 |
+
|
| 82 |
+
return {
|
| 83 |
+
'file': os.path.basename(fold_path),
|
| 84 |
+
'valid': len(issues) == 0,
|
| 85 |
+
'issues': issues,
|
| 86 |
+
'interior_vertices': len(interior),
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def validate_all(targets_dir: str = None) -> bool:
|
| 91 |
+
"""Validate all .fold files in the targets directory. Returns True if all pass."""
|
| 92 |
+
if targets_dir is None:
|
| 93 |
+
targets_dir = Path(__file__).parent
|
| 94 |
+
|
| 95 |
+
all_pass = True
|
| 96 |
+
fold_files = sorted(Path(targets_dir).glob('*.fold'))
|
| 97 |
+
|
| 98 |
+
if not fold_files:
|
| 99 |
+
print("No .fold files found")
|
| 100 |
+
return False
|
| 101 |
+
|
| 102 |
+
for fold_path in fold_files:
|
| 103 |
+
result = validate_target(str(fold_path))
|
| 104 |
+
status = "OK" if result['valid'] else "FAIL"
|
| 105 |
+
n_interior = result['interior_vertices']
|
| 106 |
+
print(f" [{status}] {result['file']} — {n_interior} interior vertices")
|
| 107 |
+
if result['issues']:
|
| 108 |
+
for issue in result['issues']:
|
| 109 |
+
print(f" ! {issue}")
|
| 110 |
+
if not result['valid']:
|
| 111 |
+
all_pass = False
|
| 112 |
+
|
| 113 |
+
return all_pass
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == '__main__':
|
| 117 |
+
print("Validating targets...")
|
| 118 |
+
ok = validate_all()
|
| 119 |
+
sys.exit(0 if ok else 1)
|
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json, sys, os
|
| 2 |
+
|
| 3 |
+
targets_dir = "/Users/ianalin/Desktop/optigami/env/targets"
|
| 4 |
+
for fname in os.listdir(targets_dir):
|
| 5 |
+
if not fname.endswith(".fold"):
|
| 6 |
+
continue
|
| 7 |
+
with open(os.path.join(targets_dir, fname)) as f:
|
| 8 |
+
d = json.load(f)
|
| 9 |
+
n_v = len(d["vertices_coords"])
|
| 10 |
+
n_e = len(d["edges_vertices"])
|
| 11 |
+
assert len(d["edges_assignment"]) == n_e, f"{fname}: assignment length mismatch"
|
| 12 |
+
assert len(d["edges_foldAngle"]) == n_e, f"{fname}: foldAngle length mismatch"
|
| 13 |
+
for e in d["edges_vertices"]:
|
| 14 |
+
assert e[0] < n_v and e[1] < n_v, f"{fname}: edge references invalid vertex"
|
| 15 |
+
for face in d["faces_vertices"]:
|
| 16 |
+
for vi in face:
|
| 17 |
+
assert vi < n_v, f"{fname}: face references invalid vertex"
|
| 18 |
+
creases = [i for i,a in enumerate(d["edges_assignment"]) if a in ('M','V')]
|
| 19 |
+
print(f"{fname}: {n_v} vertices, {n_e} edges, {len(creases)} creases, level={d.get('level','?')} OK")
|
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from .graph import CreaseGraph
|
| 3 |
+
from .paper_state import PaperState
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _compute_sector_angles(vertex_id: int, graph: CreaseGraph) -> list[float]:
|
| 7 |
+
"""Compute consecutive sector angles (CCW) at a vertex from its cyclic edges."""
|
| 8 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 9 |
+
n = len(cyclic_edges)
|
| 10 |
+
vx, vy = graph.vertices[vertex_id]
|
| 11 |
+
|
| 12 |
+
angles = []
|
| 13 |
+
for eid in cyclic_edges:
|
| 14 |
+
ev1, ev2, _ = graph.edges[eid]
|
| 15 |
+
other_id = ev2 if ev1 == vertex_id else ev1
|
| 16 |
+
ox, oy = graph.vertices[other_id]
|
| 17 |
+
angles.append(np.arctan2(oy - vy, ox - vx))
|
| 18 |
+
|
| 19 |
+
sectors = []
|
| 20 |
+
for i in range(n):
|
| 21 |
+
diff = angles[(i + 1) % n] - angles[i]
|
| 22 |
+
if diff < 0:
|
| 23 |
+
diff += 2 * np.pi
|
| 24 |
+
if diff > 2 * np.pi:
|
| 25 |
+
diff -= 2 * np.pi
|
| 26 |
+
sectors.append(diff)
|
| 27 |
+
|
| 28 |
+
return sectors
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def check_kawasaki_at_vertex(vertex_id: int, graph: CreaseGraph) -> tuple[bool, float]:
|
| 32 |
+
"""
|
| 33 |
+
Checks Kawasaki-Justin theorem at a single vertex.
|
| 34 |
+
|
| 35 |
+
Kawasaki: at an interior vertex with 2n creases, the alternating sum
|
| 36 |
+
of consecutive sector angles = 0.
|
| 37 |
+
Equivalently: sum(odd-indexed sectors) == sum(even-indexed sectors) == π.
|
| 38 |
+
|
| 39 |
+
Returns (satisfied: bool, |alternating_sum|: float).
|
| 40 |
+
Returns (True, 0.0) for vertices with degree < 4 (not an interior fold vertex yet).
|
| 41 |
+
Returns (False, inf) for odd-degree vertices (impossible for flat folds).
|
| 42 |
+
"""
|
| 43 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 44 |
+
n = len(cyclic_edges)
|
| 45 |
+
|
| 46 |
+
if n % 2 != 0:
|
| 47 |
+
return (False, float('inf'))
|
| 48 |
+
|
| 49 |
+
if n < 4:
|
| 50 |
+
return (True, 0.0)
|
| 51 |
+
|
| 52 |
+
sectors = _compute_sector_angles(vertex_id, graph)
|
| 53 |
+
alt_sum = sum(s * ((-1) ** i) for i, s in enumerate(sectors))
|
| 54 |
+
return (abs(alt_sum) < 1e-9, abs(alt_sum))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def check_maekawa_at_vertex(vertex_id: int, graph: CreaseGraph) -> bool:
|
| 58 |
+
"""
|
| 59 |
+
Checks Maekawa-Justin theorem at a single vertex.
|
| 60 |
+
|
| 61 |
+
Maekawa: |M - V| == 2 where M, V are counts of mountain/valley fold edges
|
| 62 |
+
at the vertex. BOUNDARY edges ('B') are NOT counted.
|
| 63 |
+
|
| 64 |
+
Returns True if satisfied or if vertex has fewer than 4 fold edges (not yet active).
|
| 65 |
+
"""
|
| 66 |
+
edge_ids = graph.vertex_edges[vertex_id]
|
| 67 |
+
fold_edges = [
|
| 68 |
+
eid for eid in edge_ids
|
| 69 |
+
if graph.edges[eid][2] in ('M', 'V')
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
if len(fold_edges) < 4:
|
| 73 |
+
return True
|
| 74 |
+
|
| 75 |
+
m_count = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'M')
|
| 76 |
+
v_count = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'V')
|
| 77 |
+
return abs(m_count - v_count) == 2
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def check_blb_at_vertex(vertex_id: int, graph: CreaseGraph) -> list[tuple[int, int]]:
|
| 81 |
+
"""
|
| 82 |
+
Checks Big-Little-Big lemma at a single vertex.
|
| 83 |
+
|
| 84 |
+
BLB: if sector angle i is a strict local minimum (smaller than both neighbors),
|
| 85 |
+
the fold edges bounding that sector must have OPPOSITE MV assignments.
|
| 86 |
+
|
| 87 |
+
Returns list of (edge_a_id, edge_b_id) pairs where BLB is violated.
|
| 88 |
+
Empty list = no violations.
|
| 89 |
+
"""
|
| 90 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 91 |
+
n = len(cyclic_edges)
|
| 92 |
+
|
| 93 |
+
if n < 4:
|
| 94 |
+
return []
|
| 95 |
+
|
| 96 |
+
sectors = _compute_sector_angles(vertex_id, graph)
|
| 97 |
+
violations = []
|
| 98 |
+
|
| 99 |
+
for i in range(n):
|
| 100 |
+
prev_sector = sectors[(i - 1) % n]
|
| 101 |
+
next_sector = sectors[(i + 1) % n]
|
| 102 |
+
|
| 103 |
+
if sectors[i] < prev_sector and sectors[i] < next_sector:
|
| 104 |
+
edge_a = cyclic_edges[i]
|
| 105 |
+
edge_b = cyclic_edges[(i + 1) % n]
|
| 106 |
+
|
| 107 |
+
assign_a = graph.edges[edge_a][2]
|
| 108 |
+
assign_b = graph.edges[edge_b][2]
|
| 109 |
+
|
| 110 |
+
if assign_a in ('M', 'V') and assign_b in ('M', 'V'):
|
| 111 |
+
if assign_a == assign_b:
|
| 112 |
+
violations.append((edge_a, edge_b))
|
| 113 |
+
|
| 114 |
+
return violations
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _angle_diff(a1: float, a2: float) -> float:
|
| 118 |
+
"""Minimum angle difference between two directed lines (considering 180° symmetry)."""
|
| 119 |
+
diff = abs(a1 - a2) % np.pi
|
| 120 |
+
return min(diff, np.pi - diff)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def geometric_crease_coverage(
|
| 124 |
+
state: PaperState,
|
| 125 |
+
target_edges: list[dict],
|
| 126 |
+
tol_pos: float = 0.05,
|
| 127 |
+
tol_angle_deg: float = 5.0,
|
| 128 |
+
) -> tuple[float, float]:
|
| 129 |
+
"""
|
| 130 |
+
Computes how well the current crease pattern matches the target.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
target_edges: list of {'v1': (x1,y1), 'v2': (x2,y2), 'assignment': 'M'|'V'}
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
(coverage, economy)
|
| 137 |
+
coverage: fraction of target creases matched [0, 1]
|
| 138 |
+
economy: penalty for excess creases [0, 1], 1.0 = no excess
|
| 139 |
+
"""
|
| 140 |
+
current_edges = state.crease_edges()
|
| 141 |
+
tol_angle_rad = np.deg2rad(tol_angle_deg)
|
| 142 |
+
|
| 143 |
+
matched = 0
|
| 144 |
+
for target in target_edges:
|
| 145 |
+
tx1, ty1 = target['v1']
|
| 146 |
+
tx2, ty2 = target['v2']
|
| 147 |
+
t_mid = ((tx1 + tx2) / 2.0, (ty1 + ty2) / 2.0)
|
| 148 |
+
t_angle = np.arctan2(ty2 - ty1, tx2 - tx1)
|
| 149 |
+
|
| 150 |
+
for current in current_edges:
|
| 151 |
+
cx1, cy1 = current['v1']
|
| 152 |
+
cx2, cy2 = current['v2']
|
| 153 |
+
c_mid = ((cx1 + cx2) / 2.0, (cy1 + cy2) / 2.0)
|
| 154 |
+
c_angle = np.arctan2(cy2 - cy1, cx2 - cx1)
|
| 155 |
+
|
| 156 |
+
mid_dist = np.hypot(c_mid[0] - t_mid[0], c_mid[1] - t_mid[1])
|
| 157 |
+
angle_distance = _angle_diff(c_angle, t_angle)
|
| 158 |
+
|
| 159 |
+
if mid_dist <= tol_pos and angle_distance <= tol_angle_rad:
|
| 160 |
+
matched += 1
|
| 161 |
+
break
|
| 162 |
+
|
| 163 |
+
coverage = matched / max(len(target_edges), 1)
|
| 164 |
+
n_excess = max(0, len(current_edges) - len(target_edges))
|
| 165 |
+
economy = max(0.0, 1.0 - n_excess / max(len(target_edges), 1))
|
| 166 |
+
return (coverage, economy)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def check_all_vertices(graph: CreaseGraph) -> dict:
|
| 170 |
+
"""
|
| 171 |
+
Run all vertex-level checks on every interior vertex.
|
| 172 |
+
|
| 173 |
+
Returns dict with:
|
| 174 |
+
'kawasaki': float # fraction of interior vertices passing Kawasaki [0,1]
|
| 175 |
+
'maekawa': float # fraction passing Maekawa [0,1]
|
| 176 |
+
'blb': float # fraction with no BLB violations [0,1]
|
| 177 |
+
'n_interior': int # number of interior vertices checked
|
| 178 |
+
'per_vertex': list[dict] # per-vertex details
|
| 179 |
+
"""
|
| 180 |
+
interior = graph.interior_vertices()
|
| 181 |
+
|
| 182 |
+
if not interior:
|
| 183 |
+
return {
|
| 184 |
+
'kawasaki': 1.0,
|
| 185 |
+
'maekawa': 1.0,
|
| 186 |
+
'blb': 1.0,
|
| 187 |
+
'n_interior': 0,
|
| 188 |
+
'per_vertex': [],
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
per_vertex = []
|
| 192 |
+
kaw_pass = 0
|
| 193 |
+
mae_pass = 0
|
| 194 |
+
blb_pass = 0
|
| 195 |
+
|
| 196 |
+
for vid in interior:
|
| 197 |
+
kaw_ok, kaw_val = check_kawasaki_at_vertex(vid, graph)
|
| 198 |
+
mae_ok = check_maekawa_at_vertex(vid, graph)
|
| 199 |
+
blb_violations = check_blb_at_vertex(vid, graph)
|
| 200 |
+
blb_ok = len(blb_violations) == 0
|
| 201 |
+
|
| 202 |
+
kaw_pass += int(kaw_ok)
|
| 203 |
+
mae_pass += int(mae_ok)
|
| 204 |
+
blb_pass += int(blb_ok)
|
| 205 |
+
|
| 206 |
+
per_vertex.append({
|
| 207 |
+
'vertex_id': vid,
|
| 208 |
+
'kawasaki_ok': kaw_ok,
|
| 209 |
+
'kawasaki_error': kaw_val,
|
| 210 |
+
'maekawa_ok': mae_ok,
|
| 211 |
+
'blb_violations': blb_violations,
|
| 212 |
+
})
|
| 213 |
+
|
| 214 |
+
n = len(interior)
|
| 215 |
+
return {
|
| 216 |
+
'kawasaki': kaw_pass / n,
|
| 217 |
+
'maekawa': mae_pass / n,
|
| 218 |
+
'blb': blb_pass / n,
|
| 219 |
+
'n_interior': n,
|
| 220 |
+
'per_vertex': per_vertex,
|
| 221 |
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
spec_version: 1
|
| 2 |
+
name: optigami
|
| 3 |
+
type: space
|
| 4 |
+
runtime: fastapi
|
| 5 |
+
app: openenv_server.app:app
|
| 6 |
+
port: 8000
|
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenEnv integration runtime for Optigami."""
|
| 2 |
+
|
| 3 |
+
from .environment import OpenEnvOrigamiEnvironment
|
| 4 |
+
from .models import OrigamiAction, OrigamiObservation, OrigamiState
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"OpenEnvOrigamiEnvironment",
|
| 8 |
+
"OrigamiAction",
|
| 9 |
+
"OrigamiObservation",
|
| 10 |
+
"OrigamiState",
|
| 11 |
+
]
|
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any, Optional
|
| 4 |
+
|
| 5 |
+
from openenv.core.env_server.interfaces import Environment
|
| 6 |
+
|
| 7 |
+
from env.environment import OrigamiEnvironment
|
| 8 |
+
|
| 9 |
+
from .models import OrigamiAction, OrigamiObservation, OrigamiState
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class OpenEnvOrigamiEnvironment(Environment[OrigamiAction, OrigamiObservation, OrigamiState]):
|
| 13 |
+
"""OpenEnv adapter over the existing OrigamiEnvironment implementation."""
|
| 14 |
+
|
| 15 |
+
SUPPORTS_CONCURRENT_SESSIONS = True
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
default_mode: str = "step",
|
| 20 |
+
max_steps: int = 8,
|
| 21 |
+
targets_dir: Optional[str] = None,
|
| 22 |
+
):
|
| 23 |
+
super().__init__()
|
| 24 |
+
self.default_mode = default_mode
|
| 25 |
+
self.max_steps = max_steps
|
| 26 |
+
self.targets_dir = targets_dir
|
| 27 |
+
self._env: Optional[OrigamiEnvironment] = None
|
| 28 |
+
self._episode_id: Optional[str] = None
|
| 29 |
+
|
| 30 |
+
def _new_env(self, mode: Optional[str] = None) -> OrigamiEnvironment:
|
| 31 |
+
return OrigamiEnvironment(
|
| 32 |
+
mode=mode or self.default_mode,
|
| 33 |
+
max_steps=self.max_steps,
|
| 34 |
+
targets_dir=self.targets_dir,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def reset(
|
| 38 |
+
self,
|
| 39 |
+
seed: Optional[int] = None,
|
| 40 |
+
episode_id: Optional[str] = None,
|
| 41 |
+
**kwargs: Any,
|
| 42 |
+
) -> OrigamiObservation:
|
| 43 |
+
del seed # deterministic seed plumbing can be added later
|
| 44 |
+
|
| 45 |
+
mode = kwargs.get("mode", self.default_mode)
|
| 46 |
+
target_name = kwargs.get("target_name")
|
| 47 |
+
|
| 48 |
+
self._env = self._new_env(mode=mode)
|
| 49 |
+
self._episode_id = episode_id
|
| 50 |
+
obs_dict = self._env.reset(target_name=target_name)
|
| 51 |
+
|
| 52 |
+
return OrigamiObservation(
|
| 53 |
+
done=False,
|
| 54 |
+
reward=None,
|
| 55 |
+
metadata={"available_targets": self._env.available_targets()},
|
| 56 |
+
prompt=obs_dict.get("prompt", ""),
|
| 57 |
+
target_name=obs_dict.get("target_name"),
|
| 58 |
+
step=obs_dict.get("step", 0),
|
| 59 |
+
paper_state=self._paper_state_snapshot(),
|
| 60 |
+
info=self._env._info(),
|
| 61 |
+
reward_components={},
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def step(
|
| 65 |
+
self,
|
| 66 |
+
action: OrigamiAction,
|
| 67 |
+
timeout_s: Optional[float] = None,
|
| 68 |
+
**kwargs: Any,
|
| 69 |
+
) -> OrigamiObservation:
|
| 70 |
+
del timeout_s, kwargs
|
| 71 |
+
|
| 72 |
+
if self._env is None:
|
| 73 |
+
self.reset(target_name=action.target_name)
|
| 74 |
+
|
| 75 |
+
assert self._env is not None
|
| 76 |
+
|
| 77 |
+
if action.target_name and action.target_name != self._env.target_name:
|
| 78 |
+
self.reset(target_name=action.target_name, mode=self._env.mode)
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
if action.mode == "sequence":
|
| 82 |
+
if not action.completion:
|
| 83 |
+
return self._error_observation("sequence mode requires completion")
|
| 84 |
+
|
| 85 |
+
seq_env = self._new_env(mode="code_as_policy")
|
| 86 |
+
seq_env.reset(target_name=self._env.target_name)
|
| 87 |
+
obs_dict, reward_dict, done, info = seq_env.step(action.completion)
|
| 88 |
+
self._env = seq_env
|
| 89 |
+
else:
|
| 90 |
+
if action.fold is not None:
|
| 91 |
+
fold_payload = {
|
| 92 |
+
"from": list(action.fold.from_point),
|
| 93 |
+
"to": list(action.fold.to_point),
|
| 94 |
+
"assignment": action.fold.assignment,
|
| 95 |
+
"instruction": action.fold.instruction,
|
| 96 |
+
}
|
| 97 |
+
env_action: Any = fold_payload
|
| 98 |
+
elif action.completion:
|
| 99 |
+
env_action = action.completion
|
| 100 |
+
else:
|
| 101 |
+
return self._error_observation("single mode requires fold or completion")
|
| 102 |
+
|
| 103 |
+
obs_dict, reward_dict, done, info = self._env.step(env_action)
|
| 104 |
+
|
| 105 |
+
total = reward_dict.get("total") if isinstance(reward_dict, dict) else None
|
| 106 |
+
return OrigamiObservation(
|
| 107 |
+
done=bool(done),
|
| 108 |
+
reward=float(total) if isinstance(total, (int, float)) else None,
|
| 109 |
+
metadata={"target_name": self._env.target_name},
|
| 110 |
+
prompt=obs_dict.get("prompt", ""),
|
| 111 |
+
target_name=obs_dict.get("target_name", self._env.target_name),
|
| 112 |
+
step=obs_dict.get("step", self._env.step_count),
|
| 113 |
+
paper_state=self._paper_state_snapshot(),
|
| 114 |
+
info=info or {},
|
| 115 |
+
reward_components=reward_dict or {},
|
| 116 |
+
)
|
| 117 |
+
except Exception as exc: # pragma: no cover - defensive path
|
| 118 |
+
return self._error_observation(str(exc))
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def state(self) -> OrigamiState:
|
| 122 |
+
if self._env is None:
|
| 123 |
+
tmp_env = self._new_env(mode=self.default_mode)
|
| 124 |
+
return OrigamiState(
|
| 125 |
+
episode_id=self._episode_id,
|
| 126 |
+
step_count=0,
|
| 127 |
+
mode=tmp_env.mode,
|
| 128 |
+
target_name=None,
|
| 129 |
+
paper={},
|
| 130 |
+
last_reward={},
|
| 131 |
+
available_targets=tmp_env.available_targets(),
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
env_state = self._env.state()
|
| 135 |
+
return OrigamiState(
|
| 136 |
+
episode_id=self._episode_id,
|
| 137 |
+
step_count=env_state.get("step", self._env.step_count),
|
| 138 |
+
mode=env_state.get("mode", self._env.mode),
|
| 139 |
+
target_name=env_state.get("target", self._env.target_name),
|
| 140 |
+
paper=env_state.get("paper", {}),
|
| 141 |
+
last_reward=self._env.last_reward or {},
|
| 142 |
+
available_targets=self._env.available_targets(),
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def close(self) -> None:
|
| 146 |
+
if self._env is not None:
|
| 147 |
+
self._env.close()
|
| 148 |
+
self._env = None
|
| 149 |
+
|
| 150 |
+
def _paper_state_snapshot(self) -> dict[str, Any]:
|
| 151 |
+
if self._env is None or self._env.paper is None:
|
| 152 |
+
return {"vertices": {}, "edges": [], "anchor_points": []}
|
| 153 |
+
|
| 154 |
+
graph = self._env.paper.graph
|
| 155 |
+
return {
|
| 156 |
+
"vertices": {str(k): [float(v[0]), float(v[1])] for k, v in graph.vertices.items()},
|
| 157 |
+
"edges": [
|
| 158 |
+
{
|
| 159 |
+
"id": int(eid),
|
| 160 |
+
"v1": [float(graph.vertices[v1][0]), float(graph.vertices[v1][1])],
|
| 161 |
+
"v2": [float(graph.vertices[v2][0]), float(graph.vertices[v2][1])],
|
| 162 |
+
"assignment": assignment,
|
| 163 |
+
}
|
| 164 |
+
for eid, (v1, v2, assignment) in graph.edges.items()
|
| 165 |
+
],
|
| 166 |
+
"anchor_points": [
|
| 167 |
+
[float(x), float(y)] for (x, y) in self._env.paper.anchor_points()
|
| 168 |
+
],
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
def _error_observation(self, message: str) -> OrigamiObservation:
|
| 172 |
+
return OrigamiObservation(
|
| 173 |
+
done=False,
|
| 174 |
+
reward=-0.1,
|
| 175 |
+
metadata={"error": True},
|
| 176 |
+
prompt="",
|
| 177 |
+
target_name=self._env.target_name if self._env else None,
|
| 178 |
+
step=self._env.step_count if self._env else 0,
|
| 179 |
+
paper_state=self._paper_state_snapshot(),
|
| 180 |
+
info=self._env._info() if self._env else {},
|
| 181 |
+
reward_components={"format": 0.0, "total": -0.1, "error": message},
|
| 182 |
+
error=message,
|
| 183 |
+
)
|
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any, Literal, Optional
|
| 4 |
+
|
| 5 |
+
from pydantic import BaseModel, Field, field_validator
|
| 6 |
+
|
| 7 |
+
from openenv.core.env_server.types import Action, Observation, State
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class OrigamiFold(BaseModel):
|
| 11 |
+
"""Single fold action payload for step-level execution."""
|
| 12 |
+
|
| 13 |
+
from_point: list[float] = Field(..., description="Fold line start [x, y]")
|
| 14 |
+
to_point: list[float] = Field(..., description="Fold line end [x, y]")
|
| 15 |
+
assignment: Literal["M", "V"] = Field(..., description="Mountain or valley")
|
| 16 |
+
instruction: str = Field(default="", description="Optional natural language instruction")
|
| 17 |
+
|
| 18 |
+
@field_validator("from_point", "to_point")
|
| 19 |
+
@classmethod
|
| 20 |
+
def _validate_point(cls, point: list[float]) -> list[float]:
|
| 21 |
+
if len(point) != 2:
|
| 22 |
+
raise ValueError("Point must contain exactly 2 coordinates")
|
| 23 |
+
return [float(point[0]), float(point[1])]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class OrigamiAction(Action):
|
| 27 |
+
"""
|
| 28 |
+
OpenEnv action for Optigami.
|
| 29 |
+
|
| 30 |
+
Modes:
|
| 31 |
+
- single: execute one fold (pass `fold` or JSON `completion` for a single-fold object)
|
| 32 |
+
- sequence: execute a full <folds>[...]</folds> completion in one step
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
mode: Literal["single", "sequence"] = Field(default="single")
|
| 36 |
+
fold: Optional[OrigamiFold] = Field(default=None)
|
| 37 |
+
completion: Optional[str] = Field(default=None)
|
| 38 |
+
target_name: Optional[str] = Field(
|
| 39 |
+
default=None,
|
| 40 |
+
description="Optional target override; reset to this target before stepping",
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class OrigamiObservation(Observation):
|
| 45 |
+
"""OpenEnv observation payload returned by Optigami."""
|
| 46 |
+
|
| 47 |
+
prompt: str = Field(default="")
|
| 48 |
+
target_name: Optional[str] = Field(default=None)
|
| 49 |
+
step: int = Field(default=0)
|
| 50 |
+
paper_state: dict[str, Any] = Field(default_factory=dict)
|
| 51 |
+
info: dict[str, Any] = Field(default_factory=dict)
|
| 52 |
+
reward_components: dict[str, float | int | str] = Field(default_factory=dict)
|
| 53 |
+
error: Optional[str] = Field(default=None)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class OrigamiState(State):
|
| 57 |
+
"""OpenEnv state payload for Optigami."""
|
| 58 |
+
|
| 59 |
+
mode: str = Field(default="step")
|
| 60 |
+
target_name: Optional[str] = Field(default=None)
|
| 61 |
+
paper: dict[str, Any] = Field(default_factory=dict)
|
| 62 |
+
last_reward: dict[str, Any] = Field(default_factory=dict)
|
| 63 |
+
available_targets: list[str] = Field(default_factory=list)
|
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""OpenEnv FastAPI app package."""
|
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from openenv.core.env_server.http_server import create_app
|
| 4 |
+
|
| 5 |
+
from openenv_runtime.environment import OpenEnvOrigamiEnvironment
|
| 6 |
+
from openenv_runtime.models import OrigamiAction, OrigamiObservation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
app = create_app(
|
| 10 |
+
env=lambda: OpenEnvOrigamiEnvironment(),
|
| 11 |
+
action_cls=OrigamiAction,
|
| 12 |
+
observation_cls=OrigamiObservation,
|
| 13 |
+
env_name="optigami",
|
| 14 |
+
)
|
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Optigami — Implementation Plan
|
| 2 |
+
|
| 3 |
+
> Derived from handoff doc critique, origami math/physics research, and plan review.
|
| 4 |
+
> Last updated: 2026-03-07
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
## Resolved Architectural Decisions
|
| 9 |
+
|
| 10 |
+
### 1. Code-as-policy for training, step-level for demo
|
| 11 |
+
|
| 12 |
+
GRPO samples N completions for a fixed prompt, evaluates each independently, computes group advantages. That maps cleanly to **code-as-policy**: the model outputs a complete fold sequence as a JSON list, the environment executes it sequentially, terminal reward is computed once.
|
| 13 |
+
|
| 14 |
+
Step-level breaks GRPO's assumption: at step k, the prompt is conditioned on prior steps which differ across rollouts, so you're no longer comparing N completions to the same situation.
|
| 15 |
+
|
| 16 |
+
**Resolution:** Training is code-as-policy (full sequence → single reward). Demo is step-by-step (one fold at a time with live feedback). Same environment, different prompt wrapper. Same model at inference — you just prompt it one fold at a time for the demo.
|
| 17 |
+
|
| 18 |
+
### 2. 2D crease pattern is Phase 1, engineering metrics are Phase 2
|
| 19 |
+
|
| 20 |
+
**Phase 1 (hackathon MVP):** Build the crease pattern graph, check local foldability, use geometric coverage as progress proxy. Self-contained, can show reward improvement.
|
| 21 |
+
|
| 22 |
+
**Phase 2 (if time permits):** Apply fold angles to compute the 3D folded state, compute deployment ratio and bounding box. These become the primary reward, with crease coverage as scaffolding. This is where the "model discovers Miura-ori" story lives.
|
| 23 |
+
|
| 24 |
+
If the deadline forces a cut, Phase 1 ships and Phase 2 is explicitly called out as the next step.
|
| 25 |
+
|
| 26 |
+
### 3. Scope to local flat-foldability (NP-hardness acknowledged)
|
| 27 |
+
|
| 28 |
+
Global flat-foldability (layer ordering) is NP-complete (Bern-Hayes 1996). We target **local flat-foldability** at each vertex, which is polynomial. This is a feature, not a limitation — the pitch: "our rewards check the conditions every origami designer verifies. Global layer ordering is provably NP-complete."
|
| 29 |
+
|
| 30 |
+
### 4. Symmetry masking is a noted risk
|
| 31 |
+
|
| 32 |
+
For Level 1-2 targets the anchor set is small (≤8 points), manageable. For Level 3+, intersection vertices accumulate to 15-20+ points, giving O(300+) candidate fold lines. The unit square has dihedral-4 symmetry (4 rotations + 4 reflections). For Level 3+, if training shows no convergence after 500 steps, add explicit symmetry-based action pruning.
|
| 33 |
+
|
| 34 |
+
---
|
| 35 |
+
|
| 36 |
+
## File Structure
|
| 37 |
+
|
| 38 |
+
```
|
| 39 |
+
optigami/
|
| 40 |
+
env/
|
| 41 |
+
__init__.py
|
| 42 |
+
graph.py # CreaseGraph: vertices, edges, cyclic ordering
|
| 43 |
+
paper_state.py # PaperState using CreaseGraph, add_crease
|
| 44 |
+
verifier.py # Kawasaki, Maekawa, BLB, coverage, deployment ratio
|
| 45 |
+
rewards.py # compute_reward (Phase 1 + Phase 2 extension)
|
| 46 |
+
environment.py # OpenEnv wrapper, code-as-policy and step modes
|
| 47 |
+
prompts.py # LLM observation formatting
|
| 48 |
+
fold_engine.py # Phase 2: apply fold angles, compute 3D bounding box
|
| 49 |
+
targets/
|
| 50 |
+
validator.py # crimp-check all .fold files before training
|
| 51 |
+
half_horizontal.fold
|
| 52 |
+
half_vertical.fold
|
| 53 |
+
diagonal.fold
|
| 54 |
+
cross_fold.fold
|
| 55 |
+
x_fold.fold
|
| 56 |
+
pinwheel_base.fold
|
| 57 |
+
preliminary_base.fold
|
| 58 |
+
fish_base.fold
|
| 59 |
+
train.py
|
| 60 |
+
requirements.txt
|
| 61 |
+
src/ # React demo visualizer (existing)
|
| 62 |
+
plans/
|
| 63 |
+
implementation_plan.md
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
---
|
| 67 |
+
|
| 68 |
+
## Phase 1: CreaseGraph (`env/graph.py`)
|
| 69 |
+
|
| 70 |
+
Everything builds on this. Get it right first.
|
| 71 |
+
|
| 72 |
+
**Data:**
|
| 73 |
+
- `vertices`: `dict[vertex_id → (x, y)]`
|
| 74 |
+
- `edges`: `dict[edge_id → (v1, v2, assignment)]` where assignment ∈ `{M, V, B}`
|
| 75 |
+
- `vertex_edges`: `dict[vertex_id → [edge_ids]]`
|
| 76 |
+
|
| 77 |
+
**Key operations:**
|
| 78 |
+
- `add_vertex(x, y, tol=1e-9)` — deduplicated by proximity
|
| 79 |
+
- `add_edge(v1, v2, assignment)` — no duplicates
|
| 80 |
+
- `get_cyclic_edges(vertex_id)` — incident edge IDs sorted by angle of the other endpoint around the vertex (the cyclic order Kawasaki requires)
|
| 81 |
+
- `interior_vertices()` — vertices not on the unit square boundary
|
| 82 |
+
- `split_edge(edge_id, new_vertex_id)` — splits an edge at a vertex, used when a new crease intersects an existing one
|
| 83 |
+
|
| 84 |
+
**`add_crease(p1, p2, assignment)` in `PaperState`:**
|
| 85 |
+
1. Validate both endpoints are in the anchor set (within tolerance)
|
| 86 |
+
2. Find all intersections with existing edges
|
| 87 |
+
3. Add intersection vertices and split existing edges at them
|
| 88 |
+
4. Add the new crease edge(s) (possibly split by intersections)
|
| 89 |
+
5. Return `{valid, anchored, new_vertices, errors}`
|
| 90 |
+
|
| 91 |
+
**Anchor point set** (grows as creases are added):
|
| 92 |
+
- Boundary corners: `(0,0), (1,0), (1,1), (0,1)`
|
| 93 |
+
- Boundary midpoints of any existing boundary edge
|
| 94 |
+
- All crease-crease intersection vertices
|
| 95 |
+
- Midpoints of existing crease edges
|
| 96 |
+
|
| 97 |
+
---
|
| 98 |
+
|
| 99 |
+
## Phase 2: Verifiers (`env/verifier.py`)
|
| 100 |
+
|
| 101 |
+
### Even-degree fast-fail
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
def has_even_degree(vertex_id, graph) -> bool:
|
| 105 |
+
return len(graph.get_cyclic_edges(vertex_id)) % 2 == 0
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
Runs before Kawasaki. Odd-degree interior vertices are impossible — short-circuit immediately.
|
| 109 |
+
|
| 110 |
+
### Kawasaki-Justin
|
| 111 |
+
|
| 112 |
+
Sector angles must be computed in **cyclic angular order** around each vertex — not by magnitude, not arbitrarily. The handoff's sorted-angle approach was wrong; cyclic order is recovered by sorting incident edge directions by `arctan2`.
|
| 113 |
+
|
| 114 |
+
```python
|
| 115 |
+
def check_kawasaki_at_vertex(vertex_id, graph) -> tuple[bool, float]:
|
| 116 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id) # sorted by angle
|
| 117 |
+
n = len(cyclic_edges)
|
| 118 |
+
if n % 2 != 0:
|
| 119 |
+
return False, float('inf')
|
| 120 |
+
if n < 4:
|
| 121 |
+
return True, 0.0 # boundary vertex, not an interior fold vertex
|
| 122 |
+
|
| 123 |
+
v = graph.vertices[vertex_id]
|
| 124 |
+
angles = []
|
| 125 |
+
for eid in cyclic_edges:
|
| 126 |
+
v1, v2, _ = graph.edges[eid]
|
| 127 |
+
other = v2 if v1 == vertex_id else v1
|
| 128 |
+
other_pos = graph.vertices[other]
|
| 129 |
+
angles.append(np.arctan2(other_pos[1] - v[1], other_pos[0] - v[0]))
|
| 130 |
+
# angles is already in cyclic order (cyclic_edges sorted by angle)
|
| 131 |
+
|
| 132 |
+
sectors = []
|
| 133 |
+
for i in range(n):
|
| 134 |
+
diff = angles[(i+1) % n] - angles[i]
|
| 135 |
+
if diff < 0:
|
| 136 |
+
diff += 2 * np.pi
|
| 137 |
+
sectors.append(diff)
|
| 138 |
+
|
| 139 |
+
alt_sum = sum(s * ((-1)**i) for i, s in enumerate(sectors))
|
| 140 |
+
return abs(alt_sum) < 1e-9, abs(alt_sum)
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
### Maekawa-Justin
|
| 144 |
+
|
| 145 |
+
Boundary edges (`B`) must not be counted — only fold edges (`M`, `V`). The handoff counted boundary edges, which breaks Maekawa for any crease touching the paper edge.
|
| 146 |
+
|
| 147 |
+
```python
|
| 148 |
+
def check_maekawa_at_vertex(vertex_id, graph) -> bool:
|
| 149 |
+
fold_edges = [eid for eid in graph.vertex_edges[vertex_id]
|
| 150 |
+
if graph.edges[eid][2] in ('M', 'V')]
|
| 151 |
+
if len(fold_edges) < 4:
|
| 152 |
+
return True # not an interior fold vertex yet
|
| 153 |
+
M = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'M')
|
| 154 |
+
V = len(fold_edges) - M
|
| 155 |
+
return abs(M - V) == 2
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
### Big-Little-Big (BLB)
|
| 159 |
+
|
| 160 |
+
At any interior vertex, if a sector angle is a strict local minimum, the two crease lines bounding that sector must have **opposite MV parity**. This is the key pruning rule between Maekawa and layer-ordering — a pattern can satisfy Maekawa while violating BLB, meaning no valid layer ordering exists.
|
| 161 |
+
|
| 162 |
+
```python
|
| 163 |
+
def check_blb_at_vertex(vertex_id, graph) -> list[tuple]:
|
| 164 |
+
"""Returns list of (edge_a, edge_b) pairs where BLB is violated."""
|
| 165 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 166 |
+
n = len(cyclic_edges)
|
| 167 |
+
if n < 4:
|
| 168 |
+
return []
|
| 169 |
+
sectors = _compute_sectors(vertex_id, cyclic_edges, graph)
|
| 170 |
+
violations = []
|
| 171 |
+
for i in range(n):
|
| 172 |
+
prev_s = sectors[(i-1) % n]
|
| 173 |
+
next_s = sectors[(i+1) % n]
|
| 174 |
+
if sectors[i] < prev_s and sectors[i] < next_s: # strict local min
|
| 175 |
+
left_eid = cyclic_edges[i]
|
| 176 |
+
right_eid = cyclic_edges[(i+1) % n]
|
| 177 |
+
a_left = graph.edges[left_eid][2]
|
| 178 |
+
a_right = graph.edges[right_eid][2]
|
| 179 |
+
if a_left in ('M', 'V') and a_right in ('M', 'V') and a_left == a_right:
|
| 180 |
+
violations.append((left_eid, right_eid))
|
| 181 |
+
return violations
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
### Geometric Coverage (with excess penalty)
|
| 185 |
+
|
| 186 |
+
One-sided coverage alone rewards placing target creases but doesn't penalize surplus creases. Both are returned separately so the reward function can weight them independently.
|
| 187 |
+
|
| 188 |
+
```python
|
| 189 |
+
def geometric_coverage(state, target_edges, tol_pos=0.05, tol_angle=5.0) -> tuple[float, float]:
|
| 190 |
+
"""
|
| 191 |
+
Returns (coverage, economy).
|
| 192 |
+
coverage: fraction of target creases matched by current creases [0, 1]
|
| 193 |
+
economy: penalty for excess creases [0, 1], 1.0 = no excess
|
| 194 |
+
"""
|
| 195 |
+
matched = 0
|
| 196 |
+
for t_edge in target_edges:
|
| 197 |
+
for c_edge in state.crease_edges():
|
| 198 |
+
if _edges_match(t_edge, c_edge, tol_pos, tol_angle):
|
| 199 |
+
matched += 1
|
| 200 |
+
break
|
| 201 |
+
n_target = max(len(target_edges), 1)
|
| 202 |
+
n_current = len(state.crease_edges())
|
| 203 |
+
coverage = matched / n_target
|
| 204 |
+
economy = max(0.0, 1.0 - max(0, n_current - n_target) / n_target)
|
| 205 |
+
return coverage, economy
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
---
|
| 209 |
+
|
| 210 |
+
## Phase 3: Reward Function (`env/rewards.py`)
|
| 211 |
+
|
| 212 |
+
### Phase 1 reward
|
| 213 |
+
|
| 214 |
+
Single consistent definition. `progress` carries 45% — it's the only signal with real geometric content at every step. Validity signals split 20% total. Economy penalizes excess creases.
|
| 215 |
+
|
| 216 |
+
```python
|
| 217 |
+
def compute_reward_phase1(state, action_result, target) -> dict:
|
| 218 |
+
r = {}
|
| 219 |
+
|
| 220 |
+
r['format'] = 1.0 if action_result['valid'] else 0.0
|
| 221 |
+
if not r['format']:
|
| 222 |
+
return {**r, 'total': -0.1}
|
| 223 |
+
|
| 224 |
+
r['anchored'] = 1.0 if action_result['anchored'] else 0.3
|
| 225 |
+
|
| 226 |
+
interior = state.graph.interior_vertices()
|
| 227 |
+
n = max(len(interior), 1)
|
| 228 |
+
|
| 229 |
+
kaw = [check_kawasaki_at_vertex(v, state.graph) for v in interior]
|
| 230 |
+
mae = [check_maekawa_at_vertex(v, state.graph) for v in interior]
|
| 231 |
+
blb = [check_blb_at_vertex(v, state.graph) for v in interior]
|
| 232 |
+
|
| 233 |
+
r['kawasaki'] = sum(ok for ok, _ in kaw) / n
|
| 234 |
+
r['maekawa'] = sum(mae) / n
|
| 235 |
+
r['blb'] = 1.0 - sum(len(v) > 0 for v in blb) / n
|
| 236 |
+
|
| 237 |
+
coverage, economy = geometric_coverage(state, target['edges'])
|
| 238 |
+
r['progress'] = coverage
|
| 239 |
+
r['economy'] = economy
|
| 240 |
+
|
| 241 |
+
all_valid = (r['kawasaki'] == 1.0 and r['maekawa'] == 1.0 and r['blb'] == 1.0)
|
| 242 |
+
r['completion'] = 10.0 if (r['progress'] > 0.9 and all_valid) else 0.0
|
| 243 |
+
r['efficiency'] = -0.01
|
| 244 |
+
|
| 245 |
+
r['total'] = (
|
| 246 |
+
0.05 * r['anchored'] +
|
| 247 |
+
0.08 * r['kawasaki'] +
|
| 248 |
+
0.07 * r['maekawa'] +
|
| 249 |
+
0.05 * r['blb'] +
|
| 250 |
+
0.45 * r['progress'] +
|
| 251 |
+
0.10 * r['economy'] +
|
| 252 |
+
r['completion'] +
|
| 253 |
+
r['efficiency']
|
| 254 |
+
)
|
| 255 |
+
return r
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
### Phase 2 reward extension
|
| 259 |
+
|
| 260 |
+
When `fold_engine.py` is available, replace `progress` and `economy` with engineering metrics. No pre-specified target pattern required — the model optimizes objectives directly and can discover that Miura-ori is optimal.
|
| 261 |
+
|
| 262 |
+
```python
|
| 263 |
+
def compute_reward_phase2(state, action_result, folded_state) -> dict:
|
| 264 |
+
# ... same gates as phase 1 ...
|
| 265 |
+
|
| 266 |
+
r['deployment_ratio'] = compute_deployment_ratio(folded_state)
|
| 267 |
+
# = unfolded_area / folded_bounding_box_area
|
| 268 |
+
|
| 269 |
+
r['bbox_compactness'] = 1.0 - (folded_bbox_area / unfolded_area)
|
| 270 |
+
# higher = more compact fold
|
| 271 |
+
|
| 272 |
+
r['total'] = (
|
| 273 |
+
0.05 * r['anchored'] +
|
| 274 |
+
0.08 * r['kawasaki'] +
|
| 275 |
+
0.07 * r['maekawa'] +
|
| 276 |
+
0.05 * r['blb'] +
|
| 277 |
+
0.30 * r['deployment_ratio'] +
|
| 278 |
+
0.20 * r['bbox_compactness'] +
|
| 279 |
+
0.05 * r['economy'] +
|
| 280 |
+
r['completion'] +
|
| 281 |
+
r['efficiency']
|
| 282 |
+
)
|
| 283 |
+
return r
|
| 284 |
+
```
|
| 285 |
+
|
| 286 |
+
---
|
| 287 |
+
|
| 288 |
+
## Phase 4: Prompts (`env/prompts.py`)
|
| 289 |
+
|
| 290 |
+
### Code-as-policy prompt (training mode)
|
| 291 |
+
|
| 292 |
+
```
|
| 293 |
+
You are an origami designer. Generate a complete fold sequence for a unit square [0,1]x[0,1].
|
| 294 |
+
|
| 295 |
+
TARGET CREASE PATTERN:
|
| 296 |
+
Valley fold: (0.0, 0.5) -> (1.0, 0.5)
|
| 297 |
+
Mountain fold: (0.5, 0.0) -> (0.5, 1.0)
|
| 298 |
+
|
| 299 |
+
RULES (your sequence must satisfy at every interior vertex):
|
| 300 |
+
- Kawasaki: alternating sector angles sum equally (each half = 180 degrees)
|
| 301 |
+
- Maekawa: |mountain_count - valley_count| = 2
|
| 302 |
+
- Big-Little-Big: folds bounding the smallest sector must have opposite types
|
| 303 |
+
|
| 304 |
+
ANCHOR POINTS (valid fold endpoints):
|
| 305 |
+
Corners: (0,0) (1,0) (1,1) (0,1)
|
| 306 |
+
Midpoints: (0.5,0) (1,0.5) (0.5,1) (0,0.5)
|
| 307 |
+
Note: the square has 4-fold dihedral symmetry — symmetric fold sequences are equivalent.
|
| 308 |
+
|
| 309 |
+
Output a JSON list of fold operations in order. Both endpoints must be anchor points.
|
| 310 |
+
|
| 311 |
+
<folds>
|
| 312 |
+
[
|
| 313 |
+
{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"|"V"},
|
| 314 |
+
...
|
| 315 |
+
]
|
| 316 |
+
</folds>
|
| 317 |
+
```
|
| 318 |
+
|
| 319 |
+
### Step-level prompt (demo mode)
|
| 320 |
+
|
| 321 |
+
Same information, but shows only the current step's observation with prior fold history and last-step reward appended. Same model, different prompt wrapper.
|
| 322 |
+
|
| 323 |
+
```
|
| 324 |
+
... [same header] ...
|
| 325 |
+
|
| 326 |
+
CURRENT STATE (step 2 of 8):
|
| 327 |
+
Creases placed:
|
| 328 |
+
1. Mountain fold: (0.5, 0.0) -> (0.5, 1.0)
|
| 329 |
+
|
| 330 |
+
AVAILABLE ANCHOR POINTS:
|
| 331 |
+
Corners: (0.0,0.0) (1.0,0.0) (1.0,1.0) (0.0,1.0)
|
| 332 |
+
Edge midpoints:(0.5,0.0) (1.0,0.5) (0.5,1.0) (0.0,0.5)
|
| 333 |
+
Intersections: (0.5,0.5)
|
| 334 |
+
|
| 335 |
+
LAST REWARD: format=1.0 kawasaki=1.0 maekawa=1.0 blb=1.0 progress=0.32 total=0.33
|
| 336 |
+
|
| 337 |
+
Add the next crease. Output JSON only:
|
| 338 |
+
{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"|"V"}
|
| 339 |
+
```
|
| 340 |
+
|
| 341 |
+
---
|
| 342 |
+
|
| 343 |
+
## Phase 5: Target Files + Validator (`env/targets/`)
|
| 344 |
+
|
| 345 |
+
Targets are hand-authored `.fold` JSON. Before any target enters training, `validator.py` runs:
|
| 346 |
+
|
| 347 |
+
1. Parse FOLD JSON, reconstruct the CreaseGraph
|
| 348 |
+
2. For each interior vertex: even-degree → Kawasaki → Maekawa → BLB
|
| 349 |
+
3. Enumerate at least one valid MV assignment via the crimp algorithm
|
| 350 |
+
4. Fail loudly with vertex + violation details if any check fails
|
| 351 |
+
|
| 352 |
+
**Target set:**
|
| 353 |
+
|
| 354 |
+
| File | Creases | Level | Interior vertices |
|
| 355 |
+
|------|---------|-------|-------------------|
|
| 356 |
+
| `half_horizontal.fold` | 1 | 1 | 0 |
|
| 357 |
+
| `half_vertical.fold` | 1 | 1 | 0 |
|
| 358 |
+
| `diagonal.fold` | 1 | 1 | 0 |
|
| 359 |
+
| `cross_fold.fold` | 2 | 2 | 1 (degree 4) |
|
| 360 |
+
| `x_fold.fold` | 2 | 2 | 1 (degree 4) |
|
| 361 |
+
| `pinwheel_base.fold` | 4 | 2 | 4 |
|
| 362 |
+
| `preliminary_base.fold` | 4 | 3 | 4 |
|
| 363 |
+
| `fish_base.fold` | 6 | 3 | 6 |
|
| 364 |
+
|
| 365 |
+
Level 1 targets have zero interior vertices — Kawasaki/Maekawa are vacuously satisfied, the only reward signal is `progress`. The model learns to place geometrically correct folds before worrying about vertex constraints.
|
| 366 |
+
|
| 367 |
+
---
|
| 368 |
+
|
| 369 |
+
## Phase 6: OpenEnv Wrapper (`env/environment.py`)
|
| 370 |
+
|
| 371 |
+
Both modes supported. The `info` dict explicitly labels what is and isn't checked.
|
| 372 |
+
|
| 373 |
+
```python
|
| 374 |
+
class OrigamiEnvironment(Environment):
|
| 375 |
+
|
| 376 |
+
async def step(self, action):
|
| 377 |
+
if isinstance(action, list):
|
| 378 |
+
return self._execute_sequence(action) # code-as-policy
|
| 379 |
+
else:
|
| 380 |
+
return self._execute_single(action) # step mode
|
| 381 |
+
|
| 382 |
+
def _execute_sequence(self, folds):
|
| 383 |
+
for fold in folds:
|
| 384 |
+
result = self.paper.add_crease(
|
| 385 |
+
fold['from'], fold['to'], fold['assignment']
|
| 386 |
+
)
|
| 387 |
+
if not result['valid']:
|
| 388 |
+
break # partial credit: reward up to failure point
|
| 389 |
+
reward = compute_reward_phase1(self.paper, result, self.target)
|
| 390 |
+
return self._get_observation(), reward, True, self._info()
|
| 391 |
+
|
| 392 |
+
def _info(self):
|
| 393 |
+
interior = self.paper.graph.interior_vertices()
|
| 394 |
+
return {
|
| 395 |
+
'local_foldability': all(
|
| 396 |
+
check_kawasaki_at_vertex(v, self.paper.graph)[0] and
|
| 397 |
+
check_maekawa_at_vertex(v, self.paper.graph)
|
| 398 |
+
for v in interior
|
| 399 |
+
),
|
| 400 |
+
'blb_satisfied': all(
|
| 401 |
+
len(check_blb_at_vertex(v, self.paper.graph)) == 0
|
| 402 |
+
for v in interior
|
| 403 |
+
),
|
| 404 |
+
'global_foldability': 'not_checked', # NP-complete (Bern-Hayes 1996)
|
| 405 |
+
'n_interior_vertices': len(interior),
|
| 406 |
+
}
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
---
|
| 410 |
+
|
| 411 |
+
## Phase 7: Training Script (`train.py`)
|
| 412 |
+
|
| 413 |
+
Code-as-policy GRPO. Each completion is a complete fold sequence. N=8 completions per prompt evaluated in parallel, each with its own fresh `PaperState`. Terminal reward only.
|
| 414 |
+
|
| 415 |
+
```python
|
| 416 |
+
def origami_reward_fn(completions, prompts, targets):
|
| 417 |
+
rewards = []
|
| 418 |
+
for completion, target in zip(completions, targets):
|
| 419 |
+
try:
|
| 420 |
+
folds = parse_fold_list(completion) # extract JSON from <folds> tags
|
| 421 |
+
paper = PaperState()
|
| 422 |
+
for fold in folds:
|
| 423 |
+
paper.add_crease(fold['from'], fold['to'], fold['assignment'])
|
| 424 |
+
r = compute_reward_phase1(paper, {'valid': True, 'anchored': True}, target)
|
| 425 |
+
rewards.append(r['total'])
|
| 426 |
+
except Exception:
|
| 427 |
+
rewards.append(-0.1)
|
| 428 |
+
return rewards
|
| 429 |
+
```
|
| 430 |
+
|
| 431 |
+
Log all reward components separately (kawasaki, maekawa, blb, progress, economy) — the decomposed curves are the demo artifact showing the model learning to satisfy geometric constraints.
|
| 432 |
+
|
| 433 |
+
---
|
| 434 |
+
|
| 435 |
+
## Phase 8: Fold Engine / Phase 2 (`env/fold_engine.py`)
|
| 436 |
+
|
| 437 |
+
For flat-folded patterns (all creases at 180°), the folded bounding box is computable from crease pattern + simplified layer assignment. For Level 1-3 targets the layer assignment is tractable (polynomial for single-vertex, and our simple patterns have at most a few interior vertices).
|
| 438 |
+
|
| 439 |
+
Apply fold angles via reflection transforms, project to get 2D bounding box of the folded state, compute:
|
| 440 |
+
|
| 441 |
+
```
|
| 442 |
+
deployment_ratio = 1.0 / (folded_bbox_area / unfolded_area)
|
| 443 |
+
```
|
| 444 |
+
|
| 445 |
+
Higher = more compact = better engineering. With this signal the model can discover optimal fold patterns (Miura-ori, accordion folds) without a pre-specified target.
|
| 446 |
+
|
| 447 |
+
---
|
| 448 |
+
|
| 449 |
+
## Build Order
|
| 450 |
+
|
| 451 |
+
```
|
| 452 |
+
[ ] 1. requirements.txt (shapely, numpy, pytest)
|
| 453 |
+
[ ] 2. env/graph.py — CreaseGraph with cyclic ordering, split_edge
|
| 454 |
+
[ ] 3. Unit test: two crossing creases -> 1 interior vertex of degree 4, correct cyclic order
|
| 455 |
+
[ ] 4. env/paper_state.py — PaperState.add_crease with intersection handling
|
| 456 |
+
[ ] 5. env/verifier.py — even-degree, Kawasaki, Maekawa, BLB, geometric_coverage
|
| 457 |
+
[ ] 6. Unit test: degree-4 vertex with known valid/invalid angles -> Kawasaki pass/fail
|
| 458 |
+
[ ] 7. Unit test: single crease -> zero interior vertices -> verifiers return defaults (True)
|
| 459 |
+
[ ] 8. Unit test: excess crease penalty activates correctly
|
| 460 |
+
[ ] 9. targets/validator.py — crimp-check routine
|
| 461 |
+
[ ] 10. env/targets/*.fold — 4 Level 1 + 4 Level 2 targets, all passing validator
|
| 462 |
+
[ ] 11. env/rewards.py — Phase 1 compute_reward
|
| 463 |
+
[ ] 12. env/prompts.py — code-as-policy prompt + step-level prompt
|
| 464 |
+
[ ] 13. env/environment.py — both sequence and step modes + info dict
|
| 465 |
+
[ ] 14. Integration test: known valid sequence on half_horizontal, reward >= 0.9
|
| 466 |
+
[ ] 15. Integration test: invalid MV assignment on cross_fold, BLB fires
|
| 467 |
+
[ ] 16. train.py — GRPO with code-as-policy reward fn
|
| 468 |
+
[ ] 17. First training run on Level 1 targets, log all reward components to W&B
|
| 469 |
+
[ ] 18. env/fold_engine.py — Phase 2: fold angles -> 3D state -> deployment ratio
|
| 470 |
+
[ ] 19. Visualizer (React): render crease graph from FOLD JSON, animate fold history
|
| 471 |
+
```
|
| 472 |
+
|
| 473 |
+
Steps 2-3 and 5-8 are highest risk. Get the graph data structure and cyclic Kawasaki check correct before building anything on top of them. Steps 14-15 are the checkpoint before touching the training script.
|
| 474 |
+
|
| 475 |
+
---
|
| 476 |
+
|
| 477 |
+
## Key Risks
|
| 478 |
+
|
| 479 |
+
| Risk | Likelihood | Mitigation |
|
| 480 |
+
|------|-----------|------------|
|
| 481 |
+
| Cyclic sector angle computation incorrect | High | Explicit unit tests with known valid/invalid patterns |
|
| 482 |
+
| Level 3+ action space too large to learn | Medium | Dihedral symmetry hints in prompt; hard masking if no convergence after 500 steps |
|
| 483 |
+
| GRPO reward signal too sparse (no interior vertices on Level 1) | Medium | Level 1 reward is purely `progress`; works without vertex constraints |
|
| 484 |
+
| fold_engine Phase 2 infeasible in hackathon time | Medium | Phase 1 ships independently; Phase 2 is an extension |
|
| 485 |
+
| Layer ordering required for deployment ratio on complex patterns | Low | Level 1-3 patterns are tractable; flag NP-hardness in info dict |
|
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["hatchling>=1.25.0"]
|
| 3 |
+
build-backend = "hatchling.build"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "optigami"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "Optigami OpenEnv origami environment"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.10"
|
| 11 |
+
dependencies = [
|
| 12 |
+
"fastapi>=0.100.0",
|
| 13 |
+
"numpy>=1.24.0",
|
| 14 |
+
"openenv-core[core]>=0.2.1",
|
| 15 |
+
"pydantic>=2.0.0",
|
| 16 |
+
"shapely>=2.0.0",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
[tool.pytest.ini_options]
|
| 20 |
+
pythonpath = ["."]
|
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
shapely>=2.0.0
|
| 2 |
+
numpy>=1.24.0
|
| 3 |
+
scipy>=1.10.0
|
| 4 |
+
matplotlib>=3.7.0
|
| 5 |
+
pytest>=7.0.0
|
| 6 |
+
fastapi>=0.100.0
|
| 7 |
+
uvicorn>=0.23.0
|
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
FastAPI server for the origami RL environment.
|
| 3 |
+
Serves episode data to the React frontend.
|
| 4 |
+
|
| 5 |
+
Usage: uvicorn server:app --reload --port 8000
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
from fastapi import FastAPI
|
| 10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
from pydantic import BaseModel
|
| 12 |
+
except ImportError:
|
| 13 |
+
print("Run: pip install fastapi uvicorn pydantic")
|
| 14 |
+
raise
|
| 15 |
+
|
| 16 |
+
from typing import Optional
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
app = FastAPI(title="OrigamiRL API")
|
| 20 |
+
|
| 21 |
+
app.add_middleware(
|
| 22 |
+
CORSMiddleware,
|
| 23 |
+
allow_origins=["*"], # localhost:3000 for React dev
|
| 24 |
+
allow_methods=["*"],
|
| 25 |
+
allow_headers=["*"],
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class FoldAction(BaseModel):
|
| 30 |
+
from_point: list[float] # [x, y]
|
| 31 |
+
to_point: list[float] # [x, y]
|
| 32 |
+
assignment: str # 'M' or 'V'
|
| 33 |
+
instruction: str = ""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class EpisodeStep(BaseModel):
|
| 37 |
+
step: int
|
| 38 |
+
fold: Optional[FoldAction]
|
| 39 |
+
paper_state: dict # FOLD JSON of current crease graph
|
| 40 |
+
anchor_points: list[list[float]]
|
| 41 |
+
reward: dict
|
| 42 |
+
done: bool
|
| 43 |
+
info: dict
|
| 44 |
+
prompt: str # LLM prompt at this step
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class EpisodeResult(BaseModel):
|
| 48 |
+
target_name: str
|
| 49 |
+
target: dict # FOLD JSON of target
|
| 50 |
+
steps: list[EpisodeStep]
|
| 51 |
+
final_reward: dict
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@app.get("/")
|
| 55 |
+
def health_check():
|
| 56 |
+
"""Health check — returns status and available target names."""
|
| 57 |
+
from env.environment import OrigamiEnvironment
|
| 58 |
+
env = OrigamiEnvironment()
|
| 59 |
+
return {"status": "ok", "targets": env.available_targets()}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@app.get("/targets")
|
| 63 |
+
def get_targets():
|
| 64 |
+
"""Return list of available target names and their metadata."""
|
| 65 |
+
from env.environment import OrigamiEnvironment
|
| 66 |
+
env = OrigamiEnvironment()
|
| 67 |
+
targets = {}
|
| 68 |
+
for name in env.available_targets():
|
| 69 |
+
t = env._targets[name]
|
| 70 |
+
targets[name] = {
|
| 71 |
+
"name": name,
|
| 72 |
+
"level": t.get("level", 1),
|
| 73 |
+
"description": t.get("description", ""),
|
| 74 |
+
"n_creases": sum(1 for a in t["edges_assignment"] if a in ("M", "V")),
|
| 75 |
+
}
|
| 76 |
+
return targets
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@app.get("/episode/run")
|
| 80 |
+
def run_episode(target: str = "half_horizontal", completion: str = ""):
|
| 81 |
+
"""
|
| 82 |
+
Run a code-as-policy episode with a provided completion string.
|
| 83 |
+
|
| 84 |
+
If completion is empty, returns the prompt so the caller knows what to send.
|
| 85 |
+
Returns full episode result with all steps.
|
| 86 |
+
"""
|
| 87 |
+
from env.environment import OrigamiEnvironment
|
| 88 |
+
from env.prompts import parse_fold_list, code_as_policy_prompt
|
| 89 |
+
from env.rewards import compute_reward, target_crease_edges
|
| 90 |
+
|
| 91 |
+
env = OrigamiEnvironment(mode="step")
|
| 92 |
+
obs = env.reset(target_name=target)
|
| 93 |
+
|
| 94 |
+
if not completion:
|
| 95 |
+
return {"prompt": obs["prompt"], "steps": [], "target": env.target}
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
folds = parse_fold_list(completion)
|
| 99 |
+
except ValueError as e:
|
| 100 |
+
return {"error": str(e), "steps": []}
|
| 101 |
+
|
| 102 |
+
steps = []
|
| 103 |
+
for i, fold in enumerate(folds):
|
| 104 |
+
result = env.paper.add_crease(fold["from"], fold["to"], fold["assignment"])
|
| 105 |
+
reward = compute_reward(env.paper, result, env.target)
|
| 106 |
+
|
| 107 |
+
paper_state = {
|
| 108 |
+
"vertices": {str(k): list(v) for k, v in env.paper.graph.vertices.items()},
|
| 109 |
+
"edges": [
|
| 110 |
+
{
|
| 111 |
+
"id": k,
|
| 112 |
+
"v1": list(env.paper.graph.vertices[v[0]]),
|
| 113 |
+
"v2": list(env.paper.graph.vertices[v[1]]),
|
| 114 |
+
"assignment": v[2],
|
| 115 |
+
}
|
| 116 |
+
for k, v in env.paper.graph.edges.items()
|
| 117 |
+
],
|
| 118 |
+
"anchor_points": [list(p) for p in env.paper.anchor_points()],
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
# Build per-step prompt reflecting current state
|
| 122 |
+
from env.prompts import step_level_prompt
|
| 123 |
+
step_prompt = step_level_prompt(
|
| 124 |
+
target=env.target,
|
| 125 |
+
paper_state=env.paper,
|
| 126 |
+
step=i + 1,
|
| 127 |
+
max_steps=env.max_steps,
|
| 128 |
+
last_reward=reward,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
steps.append({
|
| 132 |
+
"step": i + 1,
|
| 133 |
+
"fold": {
|
| 134 |
+
"from_point": fold["from"],
|
| 135 |
+
"to_point": fold["to"],
|
| 136 |
+
"assignment": fold["assignment"],
|
| 137 |
+
"instruction": fold.get("instruction", ""),
|
| 138 |
+
},
|
| 139 |
+
"paper_state": paper_state,
|
| 140 |
+
"anchor_points": [list(p) for p in env.paper.anchor_points()],
|
| 141 |
+
"reward": reward,
|
| 142 |
+
"done": reward.get("completion", 0) > 0,
|
| 143 |
+
"info": env._info(),
|
| 144 |
+
"prompt": step_prompt,
|
| 145 |
+
})
|
| 146 |
+
|
| 147 |
+
if reward.get("completion", 0) > 0:
|
| 148 |
+
break
|
| 149 |
+
|
| 150 |
+
return {
|
| 151 |
+
"target_name": target,
|
| 152 |
+
"target": env.target,
|
| 153 |
+
"steps": steps,
|
| 154 |
+
"final_reward": steps[-1]["reward"] if steps else {},
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@app.get("/episode/demo")
|
| 159 |
+
def demo_episode(target: str = "half_horizontal"):
|
| 160 |
+
"""Return a pre-solved demo episode for each target."""
|
| 161 |
+
DEMO_COMPLETIONS = {
|
| 162 |
+
"half_horizontal": '<folds>[{"instruction": "Valley fold along horizontal center line", "from": [0, 0.5], "to": [1, 0.5], "assignment": "V"}]</folds>',
|
| 163 |
+
"half_vertical": '<folds>[{"instruction": "Mountain fold along vertical center line", "from": [0.5, 0], "to": [0.5, 1], "assignment": "M"}]</folds>',
|
| 164 |
+
"diagonal_main": '<folds>[{"instruction": "Valley fold along main diagonal", "from": [0, 0], "to": [1, 1], "assignment": "V"}]</folds>',
|
| 165 |
+
"diagonal_anti": '<folds>[{"instruction": "Mountain fold along anti-diagonal", "from": [1, 0], "to": [0, 1], "assignment": "M"}]</folds>',
|
| 166 |
+
"thirds_h": '<folds>[{"instruction": "Valley fold at one-third height", "from": [0, 0.333], "to": [1, 0.333], "assignment": "V"}, {"instruction": "Valley fold at two-thirds height", "from": [0, 0.667], "to": [1, 0.667], "assignment": "V"}]</folds>',
|
| 167 |
+
"thirds_v": '<folds>[{"instruction": "Mountain fold at one-third width", "from": [0.333, 0], "to": [0.333, 1], "assignment": "M"}, {"instruction": "Mountain fold at two-thirds width", "from": [0.667, 0], "to": [0.667, 1], "assignment": "M"}]</folds>',
|
| 168 |
+
"accordion_3h": '<folds>[{"instruction": "Valley fold at quarter height", "from": [0, 0.25], "to": [1, 0.25], "assignment": "V"}, {"instruction": "Mountain fold at half height", "from": [0, 0.5], "to": [1, 0.5], "assignment": "M"}, {"instruction": "Valley fold at three-quarter height", "from": [0, 0.75], "to": [1, 0.75], "assignment": "V"}]</folds>',
|
| 169 |
+
"accordion_4h": '<folds>[{"instruction": "Valley fold at 0.2", "from": [0, 0.2], "to": [1, 0.2], "assignment": "V"}, {"instruction": "Mountain fold at 0.4", "from": [0, 0.4], "to": [1, 0.4], "assignment": "M"}, {"instruction": "Valley fold at 0.6", "from": [0, 0.6], "to": [1, 0.6], "assignment": "V"}, {"instruction": "Mountain fold at 0.8", "from": [0, 0.8], "to": [1, 0.8], "assignment": "M"}]</folds>',
|
| 170 |
+
}
|
| 171 |
+
completion = DEMO_COMPLETIONS.get(target, DEMO_COMPLETIONS["half_horizontal"])
|
| 172 |
+
return run_episode(target=target, completion=completion)
|
|
File without changes
|
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Matplotlib 3D animation of origami folding using OrigamiSimulator.
|
| 3 |
+
|
| 4 |
+
Usage:
|
| 5 |
+
python -m sim.animate [target_name]
|
| 6 |
+
|
| 7 |
+
target_name defaults to 'half_horizontal', resolved against
|
| 8 |
+
env/targets/<target_name>.fold relative to this file's parent directory.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import json
|
| 14 |
+
import sys
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import matplotlib.pyplot as plt
|
| 18 |
+
import matplotlib.animation as animation
|
| 19 |
+
import numpy as np
|
| 20 |
+
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
|
| 21 |
+
|
| 22 |
+
from .simulator import OrigamiSimulator
|
| 23 |
+
|
| 24 |
+
# ── Design system colours ─────────────────────────────────────────────────────
|
| 25 |
+
BG_COLOR = '#0d0d14'
|
| 26 |
+
AX_COLOR = '#13131d'
|
| 27 |
+
PAPER_FACE = '#fafaf5'
|
| 28 |
+
PAPER_EDGE = '#2a2a3a'
|
| 29 |
+
MOUNTAIN_CLR = '#f59e0b' # amber
|
| 30 |
+
VALLEY_CLR = '#38bdf8' # sky
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# ── Public API ────────────────────────────────────────────────────────────────
|
| 34 |
+
|
| 35 |
+
def animate_fold(fold_file: str,
|
| 36 |
+
n_frames: int = 80,
|
| 37 |
+
steps_per_frame: int = 40,
|
| 38 |
+
target_name: str = 'origami') -> None:
|
| 39 |
+
"""
|
| 40 |
+
Animate folding from 0% → 100% → 0% in a triangle-wave loop.
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
fold_file : str
|
| 45 |
+
Path to the .fold JSON file.
|
| 46 |
+
n_frames : int
|
| 47 |
+
Total animation frames (default 80 → ~40 in, 40 out).
|
| 48 |
+
steps_per_frame : int
|
| 49 |
+
Physics steps executed per frame.
|
| 50 |
+
target_name : str
|
| 51 |
+
Display name shown in the title.
|
| 52 |
+
"""
|
| 53 |
+
fold_data = json.loads(Path(fold_file).read_text())
|
| 54 |
+
sim = OrigamiSimulator(fold_data, subdivisions=2)
|
| 55 |
+
|
| 56 |
+
# Triangle-wave fold percents: 0 → 1 → 0
|
| 57 |
+
half = n_frames // 2
|
| 58 |
+
fold_percents = np.concatenate([
|
| 59 |
+
np.linspace(0.0, 1.0, half),
|
| 60 |
+
np.linspace(1.0, 0.0, n_frames - half),
|
| 61 |
+
])
|
| 62 |
+
|
| 63 |
+
# ── Figure setup ──────────────────────────────────────────────────────────
|
| 64 |
+
fig = plt.figure(figsize=(9, 7), facecolor=BG_COLOR)
|
| 65 |
+
ax = fig.add_subplot(111, projection='3d')
|
| 66 |
+
ax.set_facecolor(AX_COLOR)
|
| 67 |
+
ax.xaxis.pane.fill = False
|
| 68 |
+
ax.yaxis.pane.fill = False
|
| 69 |
+
ax.zaxis.pane.fill = False
|
| 70 |
+
ax.grid(False)
|
| 71 |
+
ax.set_axis_off()
|
| 72 |
+
|
| 73 |
+
def update(frame: int) -> list:
|
| 74 |
+
pct = fold_percents[frame]
|
| 75 |
+
sim.set_fold_percent(pct)
|
| 76 |
+
sim.step(steps_per_frame)
|
| 77 |
+
|
| 78 |
+
ax.clear()
|
| 79 |
+
ax.set_facecolor(AX_COLOR)
|
| 80 |
+
ax.xaxis.pane.fill = False
|
| 81 |
+
ax.yaxis.pane.fill = False
|
| 82 |
+
ax.zaxis.pane.fill = False
|
| 83 |
+
ax.grid(False)
|
| 84 |
+
ax.set_axis_off()
|
| 85 |
+
|
| 86 |
+
# ── Paper surface ─────────────────────────────────────────────────────
|
| 87 |
+
verts = [sim.pos[tri] for tri in sim.triangles]
|
| 88 |
+
poly = Poly3DCollection(
|
| 89 |
+
verts,
|
| 90 |
+
alpha=0.85,
|
| 91 |
+
facecolor=PAPER_FACE,
|
| 92 |
+
edgecolor=PAPER_EDGE,
|
| 93 |
+
linewidth=0.2,
|
| 94 |
+
zorder=1,
|
| 95 |
+
)
|
| 96 |
+
ax.add_collection3d(poly)
|
| 97 |
+
|
| 98 |
+
# ── Crease / fold edges ───────────────────────────────────────────────
|
| 99 |
+
for i in range(len(sim._crease_a)):
|
| 100 |
+
if sim._crease_assign[i] not in ('M', 'V'):
|
| 101 |
+
continue
|
| 102 |
+
a, b = sim._crease_a[i], sim._crease_b[i]
|
| 103 |
+
color = MOUNTAIN_CLR if sim._crease_assign[i] == 'M' else VALLEY_CLR
|
| 104 |
+
ax.plot(
|
| 105 |
+
[sim.pos[a, 0], sim.pos[b, 0]],
|
| 106 |
+
[sim.pos[a, 1], sim.pos[b, 1]],
|
| 107 |
+
[sim.pos[a, 2], sim.pos[b, 2]],
|
| 108 |
+
color=color,
|
| 109 |
+
linewidth=2.5,
|
| 110 |
+
zorder=2,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# ── Axis limits & style ───────────────────────────────────────────────
|
| 114 |
+
ax.set_xlim(-0.2, 1.2)
|
| 115 |
+
ax.set_ylim(-0.2, 1.2)
|
| 116 |
+
ax.set_zlim(-0.6, 0.6)
|
| 117 |
+
ax.set_box_aspect([1.4, 1.4, 1.0])
|
| 118 |
+
ax.set_title(
|
| 119 |
+
f'OPTIGAMI — {target_name} fold: {pct * 100:.0f}%',
|
| 120 |
+
color='#e0e0f0',
|
| 121 |
+
fontsize=13,
|
| 122 |
+
pad=10,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
return []
|
| 126 |
+
|
| 127 |
+
ani = animation.FuncAnimation(
|
| 128 |
+
fig,
|
| 129 |
+
update,
|
| 130 |
+
frames=n_frames,
|
| 131 |
+
interval=40, # ms between frames (~25 fps)
|
| 132 |
+
blit=False,
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
plt.tight_layout()
|
| 136 |
+
plt.show()
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def main() -> None:
|
| 140 |
+
target = sys.argv[1] if len(sys.argv) > 1 else 'half_horizontal'
|
| 141 |
+
fold_file = Path(__file__).parent.parent / 'env' / 'targets' / f'{target}.fold'
|
| 142 |
+
if not fold_file.exists():
|
| 143 |
+
print(f'Error: fold file not found: {fold_file}', file=sys.stderr)
|
| 144 |
+
sys.exit(1)
|
| 145 |
+
animate_fold(str(fold_file), target_name=target)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
if __name__ == '__main__':
|
| 149 |
+
main()
|
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Origami mass-spring dynamic relaxation simulator.
|
| 3 |
+
|
| 4 |
+
Based on: Ghassaei et al., "Fast, Interactive Origami Simulation using GPU
|
| 5 |
+
Computation", 7OSME 2018.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
from scipy.spatial import Delaunay
|
| 12 |
+
|
| 13 |
+
# ── Physics constants ────────────────────────────────────────────────────────
|
| 14 |
+
|
| 15 |
+
AXIAL_STIFFNESS = 20.0 # K = AXIAL_STIFFNESS / rest_length
|
| 16 |
+
CREASE_STIFFNESS = 0.7 # K = CREASE_STIFFNESS * edge_length (M/V creases)
|
| 17 |
+
PANEL_STIFFNESS = 0.7 # K = PANEL_STIFFNESS * edge_length (F / panel edges)
|
| 18 |
+
PERCENT_DAMPING = 0.45 # global viscous damping fraction
|
| 19 |
+
DT = 0.002 # timestep (seconds)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# ── Geometry helpers ─────────────────────────────────────────────────────────
|
| 23 |
+
|
| 24 |
+
def _normalize(v: np.ndarray) -> np.ndarray:
|
| 25 |
+
n = np.linalg.norm(v)
|
| 26 |
+
return v / n if n > 1e-12 else v
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _triangulate_faces(faces_vertices: list[list[int]]) -> np.ndarray:
|
| 30 |
+
"""Fan-triangulate polygonal faces (triangles and quads supported)."""
|
| 31 |
+
tris = []
|
| 32 |
+
for face in faces_vertices:
|
| 33 |
+
if len(face) == 3:
|
| 34 |
+
tris.append(face)
|
| 35 |
+
elif len(face) == 4:
|
| 36 |
+
a, b, c, d = face
|
| 37 |
+
tris.append([a, b, c])
|
| 38 |
+
tris.append([a, c, d])
|
| 39 |
+
else:
|
| 40 |
+
# General fan triangulation for n-gons
|
| 41 |
+
for k in range(1, len(face) - 1):
|
| 42 |
+
tris.append([face[0], face[k], face[k + 1]])
|
| 43 |
+
return np.array(tris, dtype=np.int32)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _point_on_segment(p: np.ndarray, p0: np.ndarray, p1: np.ndarray,
|
| 47 |
+
tol: float = 1e-6) -> bool:
|
| 48 |
+
seg = p1 - p0
|
| 49 |
+
seg_len = np.linalg.norm(seg)
|
| 50 |
+
if seg_len < 1e-10:
|
| 51 |
+
return False
|
| 52 |
+
seg_dir = seg / seg_len
|
| 53 |
+
t = np.dot(p - p0, seg_dir)
|
| 54 |
+
perp = (p - p0) - t * seg_dir
|
| 55 |
+
return -tol <= t <= seg_len + tol and np.linalg.norm(perp) < tol
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# ── Mesh subdivision ──────────────────────────────────────────────────────────
|
| 59 |
+
|
| 60 |
+
def _subdivide(pos2d: np.ndarray, triangles: np.ndarray
|
| 61 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 62 |
+
"""Split each triangle into 4 by inserting edge midpoints."""
|
| 63 |
+
midpoint_cache: dict[tuple[int, int], int] = {}
|
| 64 |
+
new_pos = list(pos2d)
|
| 65 |
+
new_tris = []
|
| 66 |
+
|
| 67 |
+
def get_mid(i: int, j: int) -> int:
|
| 68 |
+
key = (min(i, j), max(i, j))
|
| 69 |
+
if key not in midpoint_cache:
|
| 70 |
+
mid = (np.array(new_pos[i]) + np.array(new_pos[j])) / 2.0
|
| 71 |
+
midpoint_cache[key] = len(new_pos)
|
| 72 |
+
new_pos.append(mid)
|
| 73 |
+
return midpoint_cache[key]
|
| 74 |
+
|
| 75 |
+
for tri in triangles:
|
| 76 |
+
a, b, c = tri
|
| 77 |
+
ab = get_mid(a, b)
|
| 78 |
+
bc = get_mid(b, c)
|
| 79 |
+
ca = get_mid(c, a)
|
| 80 |
+
new_tris.extend([
|
| 81 |
+
[a, ab, ca],
|
| 82 |
+
[ab, b, bc],
|
| 83 |
+
[ca, bc, c ],
|
| 84 |
+
[ab, bc, ca],
|
| 85 |
+
])
|
| 86 |
+
|
| 87 |
+
return np.array(new_pos, dtype=np.float64), np.array(new_tris, dtype=np.int32)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# ── Main simulator ────────────────────────────────────────────────────────────
|
| 91 |
+
|
| 92 |
+
class OrigamiSimulator:
|
| 93 |
+
"""
|
| 94 |
+
Mass-spring dynamic relaxation simulator for origami.
|
| 95 |
+
|
| 96 |
+
Parameters
|
| 97 |
+
----------
|
| 98 |
+
fold_data : dict
|
| 99 |
+
Parsed FOLD JSON with keys: vertices_coords, edges_vertices,
|
| 100 |
+
edges_assignment.
|
| 101 |
+
subdivisions : int
|
| 102 |
+
Number of midpoint subdivision passes (default 2 → 4× mesh density).
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
def __init__(self, fold_data: dict, subdivisions: int = 2) -> None:
|
| 106 |
+
self._fold_percent = 0.0
|
| 107 |
+
self._build(fold_data, subdivisions)
|
| 108 |
+
|
| 109 |
+
# ── Public API ────────────────────────────────────────────────────────────
|
| 110 |
+
|
| 111 |
+
def set_fold_percent(self, percent: float) -> None:
|
| 112 |
+
"""Update all crease spring target angles (0.0 = flat, 1.0 = fully folded)."""
|
| 113 |
+
self._fold_percent = float(percent)
|
| 114 |
+
self._crease_target = self._fold_percent * self._crease_full_theta
|
| 115 |
+
|
| 116 |
+
def step(self, n_steps: int = 50) -> None:
|
| 117 |
+
"""Advance the simulation by n_steps Euler integration steps."""
|
| 118 |
+
for _ in range(n_steps):
|
| 119 |
+
self._euler_step()
|
| 120 |
+
|
| 121 |
+
def reset(self) -> None:
|
| 122 |
+
"""Reset to flat state (z=0, vel=0), preserving current fold percent."""
|
| 123 |
+
self.pos = self._flat_pos.copy()
|
| 124 |
+
self.vel[:] = 0.0
|
| 125 |
+
|
| 126 |
+
@property
|
| 127 |
+
def crease_indices(self) -> list[tuple[int, int, str]]:
|
| 128 |
+
"""Return list of (a, b, assignment) for all crease springs."""
|
| 129 |
+
return list(zip(
|
| 130 |
+
self._crease_a.tolist(),
|
| 131 |
+
self._crease_b.tolist(),
|
| 132 |
+
self._crease_assign,
|
| 133 |
+
))
|
| 134 |
+
|
| 135 |
+
# ── Build ─────────────────────────────────────────────────────────────────
|
| 136 |
+
|
| 137 |
+
def _build(self, fold_data: dict, subdivisions: int) -> None:
|
| 138 |
+
coords = fold_data['vertices_coords']
|
| 139 |
+
orig_edges = fold_data['edges_vertices']
|
| 140 |
+
orig_assign = fold_data['edges_assignment']
|
| 141 |
+
|
| 142 |
+
# Original 2-D positions
|
| 143 |
+
pts2d = np.array([[x, y] for x, y in coords], dtype=np.float64)
|
| 144 |
+
|
| 145 |
+
# Build triangles from faces_vertices when available (preferred: ensures
|
| 146 |
+
# crease edges appear as actual mesh edges after subdivision).
|
| 147 |
+
# Quads [a,b,c,d] are split into [a,b,c] + [a,c,d].
|
| 148 |
+
# Fall back to Delaunay only if faces_vertices is absent.
|
| 149 |
+
if 'faces_vertices' in fold_data:
|
| 150 |
+
triangles = _triangulate_faces(fold_data['faces_vertices'])
|
| 151 |
+
else:
|
| 152 |
+
tri = Delaunay(pts2d)
|
| 153 |
+
triangles = tri.simplices.astype(np.int32)
|
| 154 |
+
|
| 155 |
+
# Build original crease segments for later classification
|
| 156 |
+
# Only M and V assignments are actual fold creases; B is boundary.
|
| 157 |
+
orig_creases: list[tuple[np.ndarray, np.ndarray, str]] = []
|
| 158 |
+
for (u, v), asgn in zip(orig_edges, orig_assign):
|
| 159 |
+
if asgn in ('M', 'V'):
|
| 160 |
+
orig_creases.append((pts2d[u], pts2d[v], asgn))
|
| 161 |
+
|
| 162 |
+
# Midpoint subdivision passes
|
| 163 |
+
pos2d = pts2d.copy()
|
| 164 |
+
for _ in range(subdivisions):
|
| 165 |
+
pos2d, triangles = _subdivide(pos2d, triangles)
|
| 166 |
+
|
| 167 |
+
n = len(pos2d)
|
| 168 |
+
|
| 169 |
+
# 3-D positions (flat, z=0)
|
| 170 |
+
pos3d = np.zeros((n, 3), dtype=np.float64)
|
| 171 |
+
pos3d[:, :2] = pos2d
|
| 172 |
+
|
| 173 |
+
self.pos = pos3d
|
| 174 |
+
self._flat_pos = pos3d.copy()
|
| 175 |
+
self.vel = np.zeros((n, 3), dtype=np.float64)
|
| 176 |
+
self.triangles = triangles
|
| 177 |
+
|
| 178 |
+
self._build_beams(triangles)
|
| 179 |
+
self._build_masses(triangles)
|
| 180 |
+
self._build_creases(triangles, pos2d, orig_creases)
|
| 181 |
+
|
| 182 |
+
def _build_beams(self, triangles: np.ndarray) -> None:
|
| 183 |
+
"""Collect all unique triangle edges as structural (axial) springs."""
|
| 184 |
+
edge_set: set[tuple[int, int]] = set()
|
| 185 |
+
for tri in triangles:
|
| 186 |
+
a, b, c = tri
|
| 187 |
+
for i, j in [(a, b), (b, c), (c, a)]:
|
| 188 |
+
edge_set.add((min(i, j), max(i, j)))
|
| 189 |
+
|
| 190 |
+
edges = np.array(sorted(edge_set), dtype=np.int32)
|
| 191 |
+
i_arr = edges[:, 0]
|
| 192 |
+
j_arr = edges[:, 1]
|
| 193 |
+
|
| 194 |
+
rest = np.linalg.norm(self.pos[i_arr] - self.pos[j_arr], axis=1)
|
| 195 |
+
K = AXIAL_STIFFNESS / np.maximum(rest, 1e-12)
|
| 196 |
+
|
| 197 |
+
self._beam_i = i_arr
|
| 198 |
+
self._beam_j = j_arr
|
| 199 |
+
self._beam_rest = rest
|
| 200 |
+
self._beam_K = K
|
| 201 |
+
|
| 202 |
+
def _build_masses(self, triangles: np.ndarray) -> None:
|
| 203 |
+
"""Mass per node = sum of (adjacent triangle area / 3)."""
|
| 204 |
+
n = len(self.pos)
|
| 205 |
+
mass = np.zeros(n, dtype=np.float64)
|
| 206 |
+
for tri in triangles:
|
| 207 |
+
a, b, c = tri
|
| 208 |
+
pa, pb, pc = self.pos[a], self.pos[b], self.pos[c]
|
| 209 |
+
area = 0.5 * np.linalg.norm(np.cross(pb - pa, pc - pa))
|
| 210 |
+
mass[a] += area / 3.0
|
| 211 |
+
mass[b] += area / 3.0
|
| 212 |
+
mass[c] += area / 3.0
|
| 213 |
+
# Guard against zero-mass nodes (degenerate triangles)
|
| 214 |
+
mass = np.maximum(mass, 1e-12)
|
| 215 |
+
self.mass = mass
|
| 216 |
+
|
| 217 |
+
def _build_creases(self, triangles: np.ndarray, pos2d: np.ndarray,
|
| 218 |
+
orig_creases: list[tuple[np.ndarray, np.ndarray, str]]
|
| 219 |
+
) -> None:
|
| 220 |
+
"""
|
| 221 |
+
Identify interior edges (shared by exactly 2 triangles) and classify
|
| 222 |
+
them as M/V fold creases or F panel springs.
|
| 223 |
+
"""
|
| 224 |
+
# Map each canonical edge → list of triangle indices containing it
|
| 225 |
+
edge_to_tris: dict[tuple[int, int], list[int]] = {}
|
| 226 |
+
tri_edge_map: dict[tuple[int, int], list[tuple[int, int, int]]] = {}
|
| 227 |
+
|
| 228 |
+
for t_idx, tri in enumerate(triangles):
|
| 229 |
+
a, b, c = tri
|
| 230 |
+
for (ei, ej), opposite in [
|
| 231 |
+
((min(a, b), max(a, b)), c),
|
| 232 |
+
((min(b, c), max(b, c)), a),
|
| 233 |
+
((min(c, a), max(c, a)), b),
|
| 234 |
+
]:
|
| 235 |
+
edge_to_tris.setdefault((ei, ej), []).append(t_idx)
|
| 236 |
+
tri_edge_map.setdefault((ei, ej), []).append((ei, ej, opposite))
|
| 237 |
+
|
| 238 |
+
crease_a: list[int] = []
|
| 239 |
+
crease_b: list[int] = []
|
| 240 |
+
crease_c: list[int] = []
|
| 241 |
+
crease_d: list[int] = []
|
| 242 |
+
crease_assign: list[str] = []
|
| 243 |
+
crease_full_theta: list[float] = []
|
| 244 |
+
crease_K: list[float] = []
|
| 245 |
+
|
| 246 |
+
for edge_key, t_indices in edge_to_tris.items():
|
| 247 |
+
if len(t_indices) != 2:
|
| 248 |
+
continue # boundary edge
|
| 249 |
+
|
| 250 |
+
ei, ej = edge_key
|
| 251 |
+
# Collect opposite nodes for each of the two triangles
|
| 252 |
+
# Find the opposite node for tri 0 and tri 1
|
| 253 |
+
opp_nodes = [None, None]
|
| 254 |
+
for t_pos, t_idx in enumerate(t_indices):
|
| 255 |
+
tri = triangles[t_idx]
|
| 256 |
+
for node in tri:
|
| 257 |
+
if node != ei and node != ej:
|
| 258 |
+
opp_nodes[t_pos] = node
|
| 259 |
+
break
|
| 260 |
+
|
| 261 |
+
c_node = opp_nodes[0]
|
| 262 |
+
d_node = opp_nodes[1]
|
| 263 |
+
if c_node is None or d_node is None:
|
| 264 |
+
continue
|
| 265 |
+
|
| 266 |
+
# Classify: check if both endpoints lie on the same original crease segment
|
| 267 |
+
pi = pos2d[ei]
|
| 268 |
+
pj = pos2d[ej]
|
| 269 |
+
asgn = 'F'
|
| 270 |
+
for p0, p1, crease_type in orig_creases:
|
| 271 |
+
if _point_on_segment(pi, p0, p1) and _point_on_segment(pj, p0, p1):
|
| 272 |
+
asgn = crease_type
|
| 273 |
+
break
|
| 274 |
+
|
| 275 |
+
if asgn == 'M':
|
| 276 |
+
full_theta = +np.pi
|
| 277 |
+
K = CREASE_STIFFNESS * np.linalg.norm(pos2d[ej] - pos2d[ei])
|
| 278 |
+
elif asgn == 'V':
|
| 279 |
+
full_theta = -np.pi
|
| 280 |
+
K = CREASE_STIFFNESS * np.linalg.norm(pos2d[ej] - pos2d[ei])
|
| 281 |
+
else: # 'F' panel
|
| 282 |
+
full_theta = 0.0
|
| 283 |
+
K = PANEL_STIFFNESS * np.linalg.norm(pos2d[ej] - pos2d[ei])
|
| 284 |
+
|
| 285 |
+
crease_a.append(ei)
|
| 286 |
+
crease_b.append(ej)
|
| 287 |
+
crease_c.append(c_node)
|
| 288 |
+
crease_d.append(d_node)
|
| 289 |
+
crease_assign.append(asgn)
|
| 290 |
+
crease_full_theta.append(full_theta)
|
| 291 |
+
crease_K.append(K)
|
| 292 |
+
|
| 293 |
+
self._crease_a = np.array(crease_a, dtype=np.int32)
|
| 294 |
+
self._crease_b = np.array(crease_b, dtype=np.int32)
|
| 295 |
+
self._crease_c = np.array(crease_c, dtype=np.int32)
|
| 296 |
+
self._crease_d = np.array(crease_d, dtype=np.int32)
|
| 297 |
+
self._crease_assign = crease_assign
|
| 298 |
+
self._crease_full_theta = np.array(crease_full_theta, dtype=np.float64)
|
| 299 |
+
self._crease_K = np.array(crease_K, dtype=np.float64)
|
| 300 |
+
self._crease_target = np.zeros(len(crease_a), dtype=np.float64)
|
| 301 |
+
|
| 302 |
+
# ── Physics ───────────────────────────────────────────────────────────────
|
| 303 |
+
|
| 304 |
+
def _beam_forces(self) -> np.ndarray:
|
| 305 |
+
"""Vectorized axial spring forces for all beams."""
|
| 306 |
+
n = len(self.pos)
|
| 307 |
+
forces = np.zeros((n, 3), dtype=np.float64)
|
| 308 |
+
|
| 309 |
+
pi = self.pos[self._beam_i]
|
| 310 |
+
pj = self.pos[self._beam_j]
|
| 311 |
+
diff = pj - pi
|
| 312 |
+
lengths = np.linalg.norm(diff, axis=1, keepdims=True)
|
| 313 |
+
lengths = np.maximum(lengths, 1e-12)
|
| 314 |
+
unit = diff / lengths
|
| 315 |
+
|
| 316 |
+
stretch = lengths[:, 0] - self._beam_rest
|
| 317 |
+
F_mag = self._beam_K * stretch # scalar force magnitude
|
| 318 |
+
|
| 319 |
+
# Damping along the edge
|
| 320 |
+
vi = self.vel[self._beam_i]
|
| 321 |
+
vj = self.vel[self._beam_j]
|
| 322 |
+
rel_vel = np.sum((vj - vi) * unit, axis=1)
|
| 323 |
+
damp_mag = PERCENT_DAMPING * rel_vel
|
| 324 |
+
F_total = (F_mag + damp_mag)[:, None] * unit
|
| 325 |
+
|
| 326 |
+
np.add.at(forces, self._beam_i, F_total)
|
| 327 |
+
np.add.at(forces, self._beam_j, -F_total)
|
| 328 |
+
return forces
|
| 329 |
+
|
| 330 |
+
def _crease_forces(self) -> np.ndarray:
|
| 331 |
+
"""Torsional spring forces for all crease/panel edges (Python loop)."""
|
| 332 |
+
n = len(self.pos)
|
| 333 |
+
forces = np.zeros((n, 3), dtype=np.float64)
|
| 334 |
+
|
| 335 |
+
pos = self.pos
|
| 336 |
+
for idx in range(len(self._crease_a)):
|
| 337 |
+
a = self._crease_a[idx]
|
| 338 |
+
b = self._crease_b[idx]
|
| 339 |
+
c = self._crease_c[idx]
|
| 340 |
+
d = self._crease_d[idx]
|
| 341 |
+
K = self._crease_K[idx]
|
| 342 |
+
target = self._crease_target[idx]
|
| 343 |
+
|
| 344 |
+
pa, pb, pc, pd = pos[a], pos[b], pos[c], pos[d]
|
| 345 |
+
|
| 346 |
+
edge_vec = pb - pa
|
| 347 |
+
edge_len = np.linalg.norm(edge_vec)
|
| 348 |
+
if edge_len < 1e-12:
|
| 349 |
+
continue
|
| 350 |
+
edge_dir = edge_vec / edge_len
|
| 351 |
+
|
| 352 |
+
# Face normals
|
| 353 |
+
n1_raw = np.cross(pb - pa, pc - pa)
|
| 354 |
+
n2_raw = np.cross(pa - pb, pd - pb)
|
| 355 |
+
n1_len = np.linalg.norm(n1_raw)
|
| 356 |
+
n2_len = np.linalg.norm(n2_raw)
|
| 357 |
+
if n1_len < 1e-12 or n2_len < 1e-12:
|
| 358 |
+
continue
|
| 359 |
+
n1 = n1_raw / n1_len
|
| 360 |
+
n2 = n2_raw / n2_len
|
| 361 |
+
|
| 362 |
+
# Dihedral angle via atan2
|
| 363 |
+
cross_n = np.cross(n1, n2)
|
| 364 |
+
sin_theta = np.dot(cross_n, edge_dir)
|
| 365 |
+
cos_theta = np.dot(n1, n2)
|
| 366 |
+
theta = np.arctan2(sin_theta, cos_theta)
|
| 367 |
+
|
| 368 |
+
delta = theta - target
|
| 369 |
+
torque = -K * delta
|
| 370 |
+
|
| 371 |
+
# Moment arms (perpendicular distance from c, d to crease line)
|
| 372 |
+
vc = pc - pa
|
| 373 |
+
vd = pd - pa
|
| 374 |
+
vc_perp = vc - np.dot(vc, edge_dir) * edge_dir
|
| 375 |
+
vd_perp = vd - np.dot(vd, edge_dir) * edge_dir
|
| 376 |
+
h_c = np.linalg.norm(vc_perp)
|
| 377 |
+
h_d = np.linalg.norm(vd_perp)
|
| 378 |
+
if h_c < 1e-12 or h_d < 1e-12:
|
| 379 |
+
continue
|
| 380 |
+
|
| 381 |
+
# Forces on opposite nodes
|
| 382 |
+
F_c = (torque / h_c) * n1
|
| 383 |
+
F_d = -(torque / h_d) * n2
|
| 384 |
+
|
| 385 |
+
# Reaction on crease nodes (moment balance)
|
| 386 |
+
proj_c = np.dot(pc - pa, edge_dir)
|
| 387 |
+
proj_d = np.dot(pd - pa, edge_dir)
|
| 388 |
+
coef_c_a = 1.0 - proj_c / edge_len
|
| 389 |
+
coef_c_b = proj_c / edge_len
|
| 390 |
+
coef_d_a = 1.0 - proj_d / edge_len
|
| 391 |
+
coef_d_b = proj_d / edge_len
|
| 392 |
+
|
| 393 |
+
forces[c] += F_c
|
| 394 |
+
forces[d] += F_d
|
| 395 |
+
forces[a] -= coef_c_a * F_c + coef_d_a * F_d
|
| 396 |
+
forces[b] -= coef_c_b * F_c + coef_d_b * F_d
|
| 397 |
+
|
| 398 |
+
return forces
|
| 399 |
+
|
| 400 |
+
def _euler_step(self) -> None:
|
| 401 |
+
forces = self._beam_forces() + self._crease_forces()
|
| 402 |
+
accel = forces / self.mass[:, None]
|
| 403 |
+
vel_new = self.vel + accel * DT
|
| 404 |
+
vel_new *= (1.0 - PERCENT_DAMPING * DT)
|
| 405 |
+
self.pos += vel_new * DT
|
| 406 |
+
self.vel = vel_new
|
|
@@ -1,38 +1,548 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
}
|
| 4 |
|
| 5 |
-
.
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
}
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
animation: App-logo-spin infinite 20s linear;
|
| 13 |
-
}
|
| 14 |
}
|
| 15 |
|
| 16 |
-
.
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
display: flex;
|
| 20 |
flex-direction: column;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
align-items: center;
|
| 22 |
justify-content: center;
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
}
|
| 26 |
|
| 27 |
-
.
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
}
|
| 30 |
|
| 31 |
-
@keyframes
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
}
|
|
|
|
| 1 |
+
:root {
|
| 2 |
+
--bg: #0d0d14;
|
| 3 |
+
--surface: #13131d;
|
| 4 |
+
--surface-2: #1a1a2e;
|
| 5 |
+
--paper-white: #fafaf5;
|
| 6 |
+
--paper-edge: #2a2a3a;
|
| 7 |
+
--mountain: #f59e0b;
|
| 8 |
+
--valley: #38bdf8;
|
| 9 |
+
--target-ghost: rgba(124, 58, 237, 0.20);
|
| 10 |
+
--target-ghost-stroke: rgba(124, 58, 237, 0.45);
|
| 11 |
+
--validity: #22d3ee;
|
| 12 |
+
--progress: #22c55e;
|
| 13 |
+
--economy: #a78bfa;
|
| 14 |
+
--text-primary: #f8fafc;
|
| 15 |
+
--text-dim: #64748b;
|
| 16 |
+
--border: #2a2a3a;
|
| 17 |
+
--border-bright: #3a3a5a;
|
| 18 |
+
--font-display: 'JetBrains Mono', monospace;
|
| 19 |
+
--font-mono: 'IBM Plex Mono', monospace;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
.app {
|
| 23 |
+
display: flex;
|
| 24 |
+
flex-direction: column;
|
| 25 |
+
height: 100vh;
|
| 26 |
+
background: var(--bg);
|
| 27 |
+
overflow: hidden;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
/* ─── HEADER ─── */
|
| 31 |
+
.app-header {
|
| 32 |
+
display: flex;
|
| 33 |
+
align-items: center;
|
| 34 |
+
gap: 24px;
|
| 35 |
+
padding: 0 20px;
|
| 36 |
+
height: 48px;
|
| 37 |
+
border-bottom: 1px solid var(--border);
|
| 38 |
+
background: var(--surface);
|
| 39 |
+
flex-shrink: 0;
|
| 40 |
+
z-index: 10;
|
| 41 |
}
|
| 42 |
|
| 43 |
+
.app-title {
|
| 44 |
+
font-family: var(--font-display);
|
| 45 |
+
font-size: 14px;
|
| 46 |
+
font-weight: 700;
|
| 47 |
+
letter-spacing: 0.12em;
|
| 48 |
+
color: var(--text-primary);
|
| 49 |
+
white-space: nowrap;
|
| 50 |
}
|
| 51 |
|
| 52 |
+
.app-title .title-accent {
|
| 53 |
+
color: var(--mountain);
|
|
|
|
|
|
|
| 54 |
}
|
| 55 |
|
| 56 |
+
.header-sep {
|
| 57 |
+
width: 1px;
|
| 58 |
+
height: 24px;
|
| 59 |
+
background: var(--border);
|
| 60 |
+
flex-shrink: 0;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
.header-right {
|
| 64 |
+
display: flex;
|
| 65 |
+
align-items: center;
|
| 66 |
+
gap: 16px;
|
| 67 |
+
margin-left: auto;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
.api-status {
|
| 71 |
+
font-size: 11px;
|
| 72 |
+
font-family: var(--font-display);
|
| 73 |
+
letter-spacing: 0.08em;
|
| 74 |
+
display: flex;
|
| 75 |
+
align-items: center;
|
| 76 |
+
gap: 6px;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
.api-status-dot {
|
| 80 |
+
width: 6px;
|
| 81 |
+
height: 6px;
|
| 82 |
+
border-radius: 50%;
|
| 83 |
+
background: var(--text-dim);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
.api-status-dot.ok {
|
| 87 |
+
background: var(--progress);
|
| 88 |
+
box-shadow: 0 0 6px var(--progress);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
.api-status-dot.err {
|
| 92 |
+
background: #ef4444;
|
| 93 |
+
box-shadow: 0 0 6px #ef4444;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
/* ─── MAIN LAYOUT ─── */
|
| 97 |
+
.app-body {
|
| 98 |
+
display: grid;
|
| 99 |
+
grid-template-columns: 1fr 280px;
|
| 100 |
+
flex: 1;
|
| 101 |
+
overflow: hidden;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
.app-left {
|
| 105 |
+
display: flex;
|
| 106 |
+
flex-direction: column;
|
| 107 |
+
overflow: hidden;
|
| 108 |
+
border-right: 1px solid var(--border);
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
.app-right {
|
| 112 |
+
display: flex;
|
| 113 |
+
flex-direction: column;
|
| 114 |
+
overflow: hidden;
|
| 115 |
+
background: var(--surface);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
/* ─── CANVAS ROW ─── */
|
| 119 |
+
.canvas-row {
|
| 120 |
+
display: flex;
|
| 121 |
+
gap: 0;
|
| 122 |
+
padding: 16px;
|
| 123 |
+
flex-shrink: 0;
|
| 124 |
+
border-bottom: 1px solid var(--border);
|
| 125 |
+
overflow-x: auto;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
.canvas-wrap {
|
| 129 |
display: flex;
|
| 130 |
flex-direction: column;
|
| 131 |
+
gap: 8px;
|
| 132 |
+
flex: 1;
|
| 133 |
+
min-width: 280px;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
.canvas-wrap + .canvas-wrap {
|
| 137 |
+
margin-left: 16px;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
.canvas-label {
|
| 141 |
+
font-family: var(--font-display);
|
| 142 |
+
font-size: 10px;
|
| 143 |
+
font-weight: 500;
|
| 144 |
+
letter-spacing: 0.14em;
|
| 145 |
+
color: var(--text-dim);
|
| 146 |
+
text-transform: uppercase;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
.canvas-svg {
|
| 150 |
+
display: block;
|
| 151 |
+
background: var(--paper-white);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
.canvas-3d {
|
| 155 |
+
display: block;
|
| 156 |
+
background: linear-gradient(180deg, #1a1a2e 0%, #0f101a 100%);
|
| 157 |
+
border: 1px solid var(--border);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
.canvas-label-row {
|
| 161 |
+
display: flex;
|
| 162 |
+
align-items: center;
|
| 163 |
+
justify-content: space-between;
|
| 164 |
+
gap: 10px;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
.fold-mode-toggle {
|
| 168 |
+
display: inline-flex;
|
| 169 |
+
border: 1px solid var(--border);
|
| 170 |
+
background: var(--surface);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
.fold-mode-btn {
|
| 174 |
+
border: none;
|
| 175 |
+
background: transparent;
|
| 176 |
+
color: var(--text-dim);
|
| 177 |
+
font-family: var(--font-display);
|
| 178 |
+
font-size: 9px;
|
| 179 |
+
letter-spacing: 0.08em;
|
| 180 |
+
padding: 3px 7px;
|
| 181 |
+
cursor: pointer;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
.fold-mode-btn + .fold-mode-btn {
|
| 185 |
+
border-left: 1px solid var(--border);
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
.fold-mode-btn.active {
|
| 189 |
+
color: var(--text-primary);
|
| 190 |
+
background: #1f2538;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
/* ─── STEP FEED ─── */
|
| 194 |
+
.step-feed-section {
|
| 195 |
+
flex: 1;
|
| 196 |
+
display: flex;
|
| 197 |
+
flex-direction: column;
|
| 198 |
+
overflow: hidden;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
.section-header {
|
| 202 |
+
font-family: var(--font-display);
|
| 203 |
+
font-size: 10px;
|
| 204 |
+
font-weight: 500;
|
| 205 |
+
letter-spacing: 0.14em;
|
| 206 |
+
color: var(--text-dim);
|
| 207 |
+
text-transform: uppercase;
|
| 208 |
+
padding: 8px 16px;
|
| 209 |
+
border-bottom: 1px solid var(--border);
|
| 210 |
+
flex-shrink: 0;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
.step-feed {
|
| 214 |
+
overflow-y: auto;
|
| 215 |
+
flex: 1;
|
| 216 |
+
padding: 4px 0;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
.step-entry {
|
| 220 |
+
display: flex;
|
| 221 |
+
flex-direction: column;
|
| 222 |
+
gap: 2px;
|
| 223 |
+
padding: 8px 16px;
|
| 224 |
+
border-bottom: 1px solid var(--border);
|
| 225 |
+
cursor: default;
|
| 226 |
+
transition: background 0.1s;
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
.step-entry:hover {
|
| 230 |
+
background: var(--surface);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
.step-entry.active {
|
| 234 |
+
background: var(--surface-2);
|
| 235 |
+
border-left: 2px solid var(--valley);
|
| 236 |
+
padding-left: 14px;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
.step-entry-top {
|
| 240 |
+
display: flex;
|
| 241 |
+
align-items: center;
|
| 242 |
+
gap: 8px;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
.step-num {
|
| 246 |
+
font-family: var(--font-display);
|
| 247 |
+
font-size: 10px;
|
| 248 |
+
font-weight: 700;
|
| 249 |
+
color: var(--text-dim);
|
| 250 |
+
width: 24px;
|
| 251 |
+
flex-shrink: 0;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
.step-instruction {
|
| 255 |
+
font-size: 12px;
|
| 256 |
+
color: var(--text-primary);
|
| 257 |
+
flex: 1;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
.assign-badge {
|
| 261 |
+
font-family: var(--font-display);
|
| 262 |
+
font-size: 10px;
|
| 263 |
+
font-weight: 700;
|
| 264 |
+
padding: 1px 5px;
|
| 265 |
+
line-height: 1.4;
|
| 266 |
+
flex-shrink: 0;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
.assign-badge.M {
|
| 270 |
+
background: var(--mountain);
|
| 271 |
+
color: #0d0d14;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
.assign-badge.V {
|
| 275 |
+
background: var(--valley);
|
| 276 |
+
color: #0d0d14;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
.assign-badge.B {
|
| 280 |
+
background: var(--border-bright);
|
| 281 |
+
color: var(--text-dim);
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
.step-reward-delta {
|
| 285 |
+
font-size: 11px;
|
| 286 |
+
color: var(--text-dim);
|
| 287 |
+
padding-left: 32px;
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
.step-reward-delta .delta-positive {
|
| 291 |
+
color: var(--progress);
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
.step-reward-delta .delta-negative {
|
| 295 |
+
color: #ef4444;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
/* ─── REWARD PANEL ─── */
|
| 299 |
+
.reward-panel {
|
| 300 |
+
padding: 12px 16px;
|
| 301 |
+
border-bottom: 1px solid var(--border);
|
| 302 |
+
flex-shrink: 0;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
.reward-row {
|
| 306 |
+
display: flex;
|
| 307 |
+
align-items: center;
|
| 308 |
+
gap: 8px;
|
| 309 |
+
margin-bottom: 6px;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
.reward-row:last-child {
|
| 313 |
+
margin-bottom: 0;
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
.reward-label {
|
| 317 |
+
font-family: var(--font-display);
|
| 318 |
+
font-size: 10px;
|
| 319 |
+
font-weight: 500;
|
| 320 |
+
letter-spacing: 0.06em;
|
| 321 |
+
color: var(--text-dim);
|
| 322 |
+
width: 72px;
|
| 323 |
+
flex-shrink: 0;
|
| 324 |
+
text-transform: uppercase;
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
.reward-track {
|
| 328 |
+
flex: 1;
|
| 329 |
+
height: 8px;
|
| 330 |
+
background: var(--bg);
|
| 331 |
+
border: 1px solid var(--border);
|
| 332 |
+
overflow: hidden;
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
.reward-bar {
|
| 336 |
+
height: 100%;
|
| 337 |
+
transition: width 0.4s ease;
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
.reward-value {
|
| 341 |
+
font-family: var(--font-display);
|
| 342 |
+
font-size: 11px;
|
| 343 |
+
font-weight: 500;
|
| 344 |
+
color: var(--text-primary);
|
| 345 |
+
width: 36px;
|
| 346 |
+
text-align: right;
|
| 347 |
+
flex-shrink: 0;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
.reward-value.dim {
|
| 351 |
+
color: var(--text-dim);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
.reward-divider {
|
| 355 |
+
height: 1px;
|
| 356 |
+
background: var(--border);
|
| 357 |
+
margin: 6px 0;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
/* ─── INFO BADGES ─── */
|
| 361 |
+
.info-badges {
|
| 362 |
+
padding: 12px 16px;
|
| 363 |
+
display: flex;
|
| 364 |
+
flex-direction: column;
|
| 365 |
+
gap: 8px;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
.info-row {
|
| 369 |
+
display: flex;
|
| 370 |
+
align-items: center;
|
| 371 |
+
justify-content: space-between;
|
| 372 |
+
gap: 8px;
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
.info-key {
|
| 376 |
+
font-family: var(--font-display);
|
| 377 |
+
font-size: 10px;
|
| 378 |
+
font-weight: 500;
|
| 379 |
+
letter-spacing: 0.06em;
|
| 380 |
+
color: var(--text-dim);
|
| 381 |
+
text-transform: uppercase;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
.info-val {
|
| 385 |
+
font-family: var(--font-display);
|
| 386 |
+
font-size: 11px;
|
| 387 |
+
font-weight: 700;
|
| 388 |
+
color: var(--text-primary);
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
.info-val.bool-true {
|
| 392 |
+
color: var(--progress);
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
.info-val.bool-false {
|
| 396 |
+
color: #ef4444;
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
.info-val.dim {
|
| 400 |
+
color: var(--text-dim);
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
/* ─── TARGET SELECTOR ─── */
|
| 404 |
+
.target-selector {
|
| 405 |
+
display: flex;
|
| 406 |
+
align-items: center;
|
| 407 |
+
gap: 8px;
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
.target-selector-label {
|
| 411 |
+
font-family: var(--font-display);
|
| 412 |
+
font-size: 10px;
|
| 413 |
+
font-weight: 500;
|
| 414 |
+
letter-spacing: 0.10em;
|
| 415 |
+
color: var(--text-dim);
|
| 416 |
+
text-transform: uppercase;
|
| 417 |
+
white-space: nowrap;
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
.target-select {
|
| 421 |
+
background: var(--surface-2);
|
| 422 |
+
border: 1px solid var(--border-bright);
|
| 423 |
+
color: var(--text-primary);
|
| 424 |
+
font-family: var(--font-display);
|
| 425 |
+
font-size: 11px;
|
| 426 |
+
padding: 4px 8px;
|
| 427 |
+
outline: none;
|
| 428 |
+
cursor: pointer;
|
| 429 |
+
min-width: 180px;
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
.target-select:focus {
|
| 433 |
+
border-color: var(--valley);
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
optgroup {
|
| 437 |
+
background: var(--surface);
|
| 438 |
+
color: var(--text-dim);
|
| 439 |
+
font-family: var(--font-display);
|
| 440 |
+
font-size: 10px;
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
option {
|
| 444 |
+
background: var(--surface-2);
|
| 445 |
+
color: var(--text-primary);
|
| 446 |
+
font-family: var(--font-display);
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
/* ─── PLAYER CONTROLS ─── */
|
| 450 |
+
.player-controls {
|
| 451 |
+
display: flex;
|
| 452 |
+
align-items: center;
|
| 453 |
+
gap: 6px;
|
| 454 |
+
flex-shrink: 0;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
.ctrl-btn {
|
| 458 |
+
background: var(--surface-2);
|
| 459 |
+
border: 1px solid var(--border-bright);
|
| 460 |
+
color: var(--text-primary);
|
| 461 |
+
font-family: var(--font-display);
|
| 462 |
+
font-size: 11px;
|
| 463 |
+
font-weight: 500;
|
| 464 |
+
padding: 4px 10px;
|
| 465 |
+
cursor: pointer;
|
| 466 |
+
white-space: nowrap;
|
| 467 |
+
line-height: 1.4;
|
| 468 |
+
letter-spacing: 0.04em;
|
| 469 |
+
transition: background 0.1s, border-color 0.1s;
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
.ctrl-btn:hover:not(:disabled) {
|
| 473 |
+
background: var(--surface);
|
| 474 |
+
border-color: var(--text-dim);
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
.ctrl-btn:disabled {
|
| 478 |
+
opacity: 0.35;
|
| 479 |
+
cursor: not-allowed;
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
.ctrl-btn.play {
|
| 483 |
+
border-color: var(--valley);
|
| 484 |
+
color: var(--valley);
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
.ctrl-btn.play:hover:not(:disabled) {
|
| 488 |
+
background: rgba(56, 189, 248, 0.1);
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
.ctrl-step-display {
|
| 492 |
+
font-family: var(--font-display);
|
| 493 |
+
font-size: 11px;
|
| 494 |
+
color: var(--text-dim);
|
| 495 |
+
padding: 4px 8px;
|
| 496 |
+
border: 1px solid var(--border);
|
| 497 |
+
background: var(--bg);
|
| 498 |
+
white-space: nowrap;
|
| 499 |
+
min-width: 72px;
|
| 500 |
+
text-align: center;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
/* ─── LOADING / ERROR ─── */
|
| 504 |
+
.app-overlay {
|
| 505 |
+
position: fixed;
|
| 506 |
+
inset: 0;
|
| 507 |
+
display: flex;
|
| 508 |
align-items: center;
|
| 509 |
justify-content: center;
|
| 510 |
+
background: var(--bg);
|
| 511 |
+
z-index: 100;
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
.overlay-message {
|
| 515 |
+
font-family: var(--font-display);
|
| 516 |
+
font-size: 13px;
|
| 517 |
+
letter-spacing: 0.1em;
|
| 518 |
+
color: var(--text-dim);
|
| 519 |
+
display: flex;
|
| 520 |
+
align-items: center;
|
| 521 |
+
gap: 12px;
|
| 522 |
}
|
| 523 |
|
| 524 |
+
.pulse-dot {
|
| 525 |
+
width: 8px;
|
| 526 |
+
height: 8px;
|
| 527 |
+
border-radius: 50%;
|
| 528 |
+
background: var(--valley);
|
| 529 |
+
animation: pulse 1.2s ease-in-out infinite;
|
| 530 |
}
|
| 531 |
|
| 532 |
+
@keyframes pulse {
|
| 533 |
+
0%, 100% { opacity: 0.2; transform: scale(0.8); }
|
| 534 |
+
50% { opacity: 1; transform: scale(1); }
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
/* ─── MISC ─── */
|
| 538 |
+
.episode-loading {
|
| 539 |
+
display: flex;
|
| 540 |
+
align-items: center;
|
| 541 |
+
justify-content: center;
|
| 542 |
+
gap: 8px;
|
| 543 |
+
padding: 12px 16px;
|
| 544 |
+
font-family: var(--font-display);
|
| 545 |
+
font-size: 11px;
|
| 546 |
+
color: var(--text-dim);
|
| 547 |
+
letter-spacing: 0.08em;
|
| 548 |
}
|
|
@@ -1,23 +1,218 @@
|
|
| 1 |
-
import
|
| 2 |
import './App.css';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
function App() {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
return (
|
| 6 |
-
<div className="
|
| 7 |
-
<header className="
|
| 8 |
-
<
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
</
|
| 12 |
-
<
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
>
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
</header>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
</div>
|
| 22 |
);
|
| 23 |
}
|
|
|
|
| 1 |
+
import { useState, useEffect, useCallback, useRef } from 'react';
|
| 2 |
import './App.css';
|
| 3 |
+
import CreaseCanvas from './components/CreaseCanvas';
|
| 4 |
+
import RewardPanel from './components/RewardPanel';
|
| 5 |
+
import StepFeed from './components/StepFeed';
|
| 6 |
+
import InfoBadges from './components/InfoBadges';
|
| 7 |
+
import TargetSelector from './components/TargetSelector';
|
| 8 |
+
import PlayerControls from './components/PlayerControls';
|
| 9 |
+
import Fold3DCanvas from './components/Fold3DCanvas';
|
| 10 |
+
|
| 11 |
+
const API_BASE = 'http://localhost:8000';
|
| 12 |
|
| 13 |
function App() {
|
| 14 |
+
const [targets, setTargets] = useState({});
|
| 15 |
+
const [selectedTarget, setSelectedTarget] = useState('half_horizontal');
|
| 16 |
+
const [episode, setEpisode] = useState(null);
|
| 17 |
+
const [currentStep, setCurrentStep] = useState(0);
|
| 18 |
+
const [playing, setPlaying] = useState(false);
|
| 19 |
+
const [foldRenderMode, setFoldRenderMode] = useState('progressive'); // 'progressive' | 'final'
|
| 20 |
+
const [apiStatus, setApiStatus] = useState('connecting'); // 'connecting' | 'ok' | 'err'
|
| 21 |
+
const [episodeLoading, setEpisodeLoading] = useState(false);
|
| 22 |
+
const intervalRef = useRef(null);
|
| 23 |
+
|
| 24 |
+
const fetchTargets = useCallback(async () => {
|
| 25 |
+
try {
|
| 26 |
+
const res = await fetch(`${API_BASE}/targets`);
|
| 27 |
+
if (!res.ok) throw new Error(`HTTP ${res.status}`);
|
| 28 |
+
const data = await res.json();
|
| 29 |
+
setTargets(data);
|
| 30 |
+
setApiStatus('ok');
|
| 31 |
+
} catch {
|
| 32 |
+
setApiStatus('err');
|
| 33 |
+
}
|
| 34 |
+
}, []);
|
| 35 |
+
|
| 36 |
+
const fetchDemoEpisode = useCallback(async (targetName) => {
|
| 37 |
+
setEpisodeLoading(true);
|
| 38 |
+
setPlaying(false);
|
| 39 |
+
setCurrentStep(0);
|
| 40 |
+
try {
|
| 41 |
+
const res = await fetch(`${API_BASE}/episode/demo?target=${targetName}`);
|
| 42 |
+
if (!res.ok) throw new Error(`HTTP ${res.status}`);
|
| 43 |
+
const data = await res.json();
|
| 44 |
+
setEpisode(data);
|
| 45 |
+
setApiStatus('ok');
|
| 46 |
+
} catch {
|
| 47 |
+
setEpisode(null);
|
| 48 |
+
setApiStatus('err');
|
| 49 |
+
} finally {
|
| 50 |
+
setEpisodeLoading(false);
|
| 51 |
+
}
|
| 52 |
+
}, []);
|
| 53 |
+
|
| 54 |
+
useEffect(() => {
|
| 55 |
+
fetchTargets();
|
| 56 |
+
}, [fetchTargets]);
|
| 57 |
+
|
| 58 |
+
useEffect(() => {
|
| 59 |
+
fetchDemoEpisode(selectedTarget);
|
| 60 |
+
}, [selectedTarget, fetchDemoEpisode]);
|
| 61 |
+
|
| 62 |
+
const totalSteps = episode ? episode.steps.length : 0;
|
| 63 |
+
|
| 64 |
+
// currentStep is 1-indexed for display (0 = "empty paper before any folds")
|
| 65 |
+
// steps array is 0-indexed: steps[0] = result of fold 1
|
| 66 |
+
const activeStepData = episode && currentStep > 0 ? episode.steps[currentStep - 1] : null;
|
| 67 |
+
|
| 68 |
+
useEffect(() => {
|
| 69 |
+
if (playing) {
|
| 70 |
+
intervalRef.current = setInterval(() => {
|
| 71 |
+
setCurrentStep(prev => {
|
| 72 |
+
if (prev >= totalSteps) {
|
| 73 |
+
setPlaying(false);
|
| 74 |
+
return prev;
|
| 75 |
+
}
|
| 76 |
+
return prev + 1;
|
| 77 |
+
});
|
| 78 |
+
}, 1500);
|
| 79 |
+
}
|
| 80 |
+
return () => clearInterval(intervalRef.current);
|
| 81 |
+
}, [playing, totalSteps]);
|
| 82 |
+
|
| 83 |
+
const handlePlay = () => {
|
| 84 |
+
if (currentStep >= totalSteps) setCurrentStep(0);
|
| 85 |
+
setPlaying(true);
|
| 86 |
+
};
|
| 87 |
+
const handlePause = () => setPlaying(false);
|
| 88 |
+
const handleNext = () => {
|
| 89 |
+
setPlaying(false);
|
| 90 |
+
setCurrentStep(prev => Math.min(prev + 1, totalSteps));
|
| 91 |
+
};
|
| 92 |
+
const handlePrev = () => {
|
| 93 |
+
setPlaying(false);
|
| 94 |
+
setCurrentStep(prev => Math.max(prev - 1, 0));
|
| 95 |
+
};
|
| 96 |
+
const handleReset = () => {
|
| 97 |
+
setPlaying(false);
|
| 98 |
+
setCurrentStep(0);
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
const targetDef = targets[selectedTarget] || null;
|
| 102 |
+
const targetFold = episode ? episode.target : null;
|
| 103 |
+
|
| 104 |
return (
|
| 105 |
+
<div className="app">
|
| 106 |
+
<header className="app-header">
|
| 107 |
+
<span className="app-title">
|
| 108 |
+
OPTI<span className="title-accent">GAMI</span> RL
|
| 109 |
+
</span>
|
| 110 |
+
<div className="header-sep" />
|
| 111 |
+
<TargetSelector
|
| 112 |
+
targets={targets}
|
| 113 |
+
selected={selectedTarget}
|
| 114 |
+
onChange={name => setSelectedTarget(name)}
|
| 115 |
+
/>
|
| 116 |
+
<div className="header-sep" />
|
| 117 |
+
<PlayerControls
|
| 118 |
+
playing={playing}
|
| 119 |
+
onPlay={handlePlay}
|
| 120 |
+
onPause={handlePause}
|
| 121 |
+
onNext={handleNext}
|
| 122 |
+
onPrev={handlePrev}
|
| 123 |
+
onReset={handleReset}
|
| 124 |
+
currentStep={currentStep}
|
| 125 |
+
totalSteps={totalSteps}
|
| 126 |
+
disabled={!episode || episodeLoading}
|
| 127 |
+
/>
|
| 128 |
+
<div className="header-right">
|
| 129 |
+
<div className="api-status">
|
| 130 |
+
<span className={`api-status-dot ${apiStatus === 'ok' ? 'ok' : apiStatus === 'err' ? 'err' : ''}`} />
|
| 131 |
+
<span>{apiStatus === 'ok' ? 'API OK' : apiStatus === 'err' ? 'API ERR' : 'CONNECTING'}</span>
|
| 132 |
+
</div>
|
| 133 |
+
</div>
|
| 134 |
</header>
|
| 135 |
+
|
| 136 |
+
<div className="app-body">
|
| 137 |
+
<div className="app-left">
|
| 138 |
+
<div className="canvas-row">
|
| 139 |
+
<div className="canvas-wrap">
|
| 140 |
+
<span className="canvas-label">
|
| 141 |
+
TARGET — {targetDef ? targetDef.name.replace(/_/g, ' ').toUpperCase() : '—'}
|
| 142 |
+
</span>
|
| 143 |
+
<CreaseCanvas
|
| 144 |
+
paperState={null}
|
| 145 |
+
target={targetFold}
|
| 146 |
+
label="TARGET"
|
| 147 |
+
dim={280}
|
| 148 |
+
ghostOnly={true}
|
| 149 |
+
/>
|
| 150 |
+
</div>
|
| 151 |
+
<div className="canvas-wrap">
|
| 152 |
+
<span className="canvas-label">
|
| 153 |
+
{currentStep === 0 ? 'INITIAL STATE' : `STEP ${currentStep} / ${totalSteps}`}
|
| 154 |
+
</span>
|
| 155 |
+
<CreaseCanvas
|
| 156 |
+
paperState={activeStepData ? activeStepData.paper_state : null}
|
| 157 |
+
target={targetFold}
|
| 158 |
+
label={currentStep === 0 ? 'INITIAL' : `STEP ${currentStep}`}
|
| 159 |
+
dim={280}
|
| 160 |
+
ghostOnly={false}
|
| 161 |
+
/>
|
| 162 |
+
</div>
|
| 163 |
+
<div className="canvas-wrap">
|
| 164 |
+
<div className="canvas-label-row">
|
| 165 |
+
<span className="canvas-label">3D FOLD PREVIEW</span>
|
| 166 |
+
<div className="fold-mode-toggle">
|
| 167 |
+
<button
|
| 168 |
+
className={`fold-mode-btn${foldRenderMode === 'progressive' ? ' active' : ''}`}
|
| 169 |
+
onClick={() => setFoldRenderMode('progressive')}
|
| 170 |
+
type="button"
|
| 171 |
+
>
|
| 172 |
+
PER CREASE
|
| 173 |
+
</button>
|
| 174 |
+
<button
|
| 175 |
+
className={`fold-mode-btn${foldRenderMode === 'final' ? ' active' : ''}`}
|
| 176 |
+
onClick={() => setFoldRenderMode('final')}
|
| 177 |
+
type="button"
|
| 178 |
+
>
|
| 179 |
+
FOLD AT END
|
| 180 |
+
</button>
|
| 181 |
+
</div>
|
| 182 |
+
</div>
|
| 183 |
+
<Fold3DCanvas
|
| 184 |
+
steps={episode ? episode.steps : []}
|
| 185 |
+
currentStep={currentStep}
|
| 186 |
+
totalSteps={totalSteps}
|
| 187 |
+
mode={foldRenderMode}
|
| 188 |
+
dim={280}
|
| 189 |
+
/>
|
| 190 |
+
</div>
|
| 191 |
+
</div>
|
| 192 |
+
|
| 193 |
+
<div className="step-feed-section">
|
| 194 |
+
<div className="section-header">FOLD SEQUENCE</div>
|
| 195 |
+
{episodeLoading ? (
|
| 196 |
+
<div className="episode-loading">
|
| 197 |
+
<div className="pulse-dot" />
|
| 198 |
+
FETCHING EPISODE...
|
| 199 |
+
</div>
|
| 200 |
+
) : (
|
| 201 |
+
<StepFeed
|
| 202 |
+
steps={episode ? episode.steps : []}
|
| 203 |
+
currentStep={currentStep}
|
| 204 |
+
/>
|
| 205 |
+
)}
|
| 206 |
+
</div>
|
| 207 |
+
</div>
|
| 208 |
+
|
| 209 |
+
<div className="app-right">
|
| 210 |
+
<div className="section-header">REWARD DECOMPOSITION</div>
|
| 211 |
+
<RewardPanel reward={activeStepData ? activeStepData.reward : null} />
|
| 212 |
+
<div className="section-header">EPISODE INFO</div>
|
| 213 |
+
<InfoBadges info={activeStepData ? activeStepData.info : null} targetDef={targetDef} />
|
| 214 |
+
</div>
|
| 215 |
+
</div>
|
| 216 |
</div>
|
| 217 |
);
|
| 218 |
}
|
|
@@ -1,8 +1 @@
|
|
| 1 |
-
|
| 2 |
-
import App from './App';
|
| 3 |
-
|
| 4 |
-
test('renders learn react link', () => {
|
| 5 |
-
render(<App />);
|
| 6 |
-
const linkElement = screen.getByText(/learn react/i);
|
| 7 |
-
expect(linkElement).toBeInTheDocument();
|
| 8 |
-
});
|
|
|
|
| 1 |
+
// Tests removed — observability dashboard
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const MOUNTAIN = '#f59e0b';
|
| 2 |
+
const VALLEY = '#38bdf8';
|
| 3 |
+
|
| 4 |
+
function toSvg(x, y, dim) {
|
| 5 |
+
return [x * dim, (1 - y) * dim];
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
function GhostEdges({ target, dim }) {
|
| 9 |
+
if (!target) return null;
|
| 10 |
+
const { vertices_coords, edges_vertices, edges_assignment } = target;
|
| 11 |
+
if (!vertices_coords || !edges_vertices || !edges_assignment) return null;
|
| 12 |
+
|
| 13 |
+
return edges_vertices.map((ev, i) => {
|
| 14 |
+
const asgn = edges_assignment[i];
|
| 15 |
+
if (asgn === 'B') return null;
|
| 16 |
+
const [v1x, v1y] = vertices_coords[ev[0]];
|
| 17 |
+
const [v2x, v2y] = vertices_coords[ev[1]];
|
| 18 |
+
const [x1, y1] = toSvg(v1x, v1y, dim);
|
| 19 |
+
const [x2, y2] = toSvg(v2x, v2y, dim);
|
| 20 |
+
const color = asgn === 'M' ? MOUNTAIN : VALLEY;
|
| 21 |
+
return (
|
| 22 |
+
<line
|
| 23 |
+
key={i}
|
| 24 |
+
x1={x1} y1={y1} x2={x2} y2={y2}
|
| 25 |
+
stroke={color}
|
| 26 |
+
strokeOpacity={0.25}
|
| 27 |
+
strokeWidth={1.5}
|
| 28 |
+
strokeDasharray="5 4"
|
| 29 |
+
/>
|
| 30 |
+
);
|
| 31 |
+
});
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
function CurrentEdges({ paperState, dim }) {
|
| 35 |
+
if (!paperState || !paperState.edges) return null;
|
| 36 |
+
return paperState.edges.map((edge) => {
|
| 37 |
+
if (edge.assignment === 'B') return null;
|
| 38 |
+
const [x1, y1] = toSvg(edge.v1[0], edge.v1[1], dim);
|
| 39 |
+
const [x2, y2] = toSvg(edge.v2[0], edge.v2[1], dim);
|
| 40 |
+
const color = edge.assignment === 'M' ? MOUNTAIN : VALLEY;
|
| 41 |
+
return (
|
| 42 |
+
<line
|
| 43 |
+
key={edge.id}
|
| 44 |
+
x1={x1} y1={y1} x2={x2} y2={y2}
|
| 45 |
+
stroke={color}
|
| 46 |
+
strokeWidth={2.5}
|
| 47 |
+
strokeLinecap="square"
|
| 48 |
+
/>
|
| 49 |
+
);
|
| 50 |
+
});
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
function AnchorCrosses({ paperState, dim }) {
|
| 54 |
+
if (!paperState || !paperState.anchor_points) return null;
|
| 55 |
+
const size = 4;
|
| 56 |
+
return paperState.anchor_points.map((pt, i) => {
|
| 57 |
+
const [cx, cy] = toSvg(pt[0], pt[1], dim);
|
| 58 |
+
return (
|
| 59 |
+
<g key={i}>
|
| 60 |
+
<line
|
| 61 |
+
x1={cx - size} y1={cy} x2={cx + size} y2={cy}
|
| 62 |
+
stroke="#64748b" strokeWidth={1}
|
| 63 |
+
/>
|
| 64 |
+
<line
|
| 65 |
+
x1={cx} y1={cy - size} x2={cx} y2={cy + size}
|
| 66 |
+
stroke="#64748b" strokeWidth={1}
|
| 67 |
+
/>
|
| 68 |
+
</g>
|
| 69 |
+
);
|
| 70 |
+
});
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
export default function CreaseCanvas({ paperState, target, dim = 280, ghostOnly = false }) {
|
| 74 |
+
const pad = 1;
|
| 75 |
+
const size = dim;
|
| 76 |
+
|
| 77 |
+
return (
|
| 78 |
+
<svg
|
| 79 |
+
className="canvas-svg"
|
| 80 |
+
width={size}
|
| 81 |
+
height={size}
|
| 82 |
+
viewBox={`0 0 ${size} ${size}`}
|
| 83 |
+
style={{ flexShrink: 0 }}
|
| 84 |
+
>
|
| 85 |
+
{/* Paper background */}
|
| 86 |
+
<rect
|
| 87 |
+
x={pad} y={pad}
|
| 88 |
+
width={size - pad * 2} height={size - pad * 2}
|
| 89 |
+
fill="#fafaf5"
|
| 90 |
+
/>
|
| 91 |
+
|
| 92 |
+
{/* Ghost target overlay */}
|
| 93 |
+
<GhostEdges target={target} dim={size} />
|
| 94 |
+
|
| 95 |
+
{/* Current paper state */}
|
| 96 |
+
{!ghostOnly && (
|
| 97 |
+
<>
|
| 98 |
+
<CurrentEdges paperState={paperState} dim={size} />
|
| 99 |
+
<AnchorCrosses paperState={paperState} dim={size} />
|
| 100 |
+
</>
|
| 101 |
+
)}
|
| 102 |
+
|
| 103 |
+
{/* Paper border */}
|
| 104 |
+
<rect
|
| 105 |
+
x={pad} y={pad}
|
| 106 |
+
width={size - pad * 2} height={size - pad * 2}
|
| 107 |
+
fill="none"
|
| 108 |
+
stroke="#2a2a3a"
|
| 109 |
+
strokeWidth={1}
|
| 110 |
+
/>
|
| 111 |
+
</svg>
|
| 112 |
+
);
|
| 113 |
+
}
|
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useCallback, useEffect, useMemo, useRef } from 'react';
|
| 2 |
+
|
| 3 |
+
const PAPER_RGB = [250, 250, 245];
|
| 4 |
+
const LIGHT_DIR = normalize3([0.4, -0.45, 1.0]);
|
| 5 |
+
const MAX_FOLD_RAD = Math.PI * 0.92;
|
| 6 |
+
const SIDE_EPS = 1e-7;
|
| 7 |
+
const MOUNTAIN_COLOR = 'rgba(245, 158, 11, 0.95)';
|
| 8 |
+
const VALLEY_COLOR = 'rgba(56, 189, 248, 0.95)';
|
| 9 |
+
|
| 10 |
+
function clamp(value, min, max) {
|
| 11 |
+
return Math.min(Math.max(value, min), max);
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
function normalize3(v) {
|
| 15 |
+
const mag = Math.hypot(v[0], v[1], v[2]);
|
| 16 |
+
if (mag < 1e-12) return [0, 0, 0];
|
| 17 |
+
return [v[0] / mag, v[1] / mag, v[2] / mag];
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
function cross3(a, b) {
|
| 21 |
+
return [
|
| 22 |
+
a[1] * b[2] - a[2] * b[1],
|
| 23 |
+
a[2] * b[0] - a[0] * b[2],
|
| 24 |
+
a[0] * b[1] - a[1] * b[0],
|
| 25 |
+
];
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
function sub3(a, b) {
|
| 29 |
+
return [a[0] - b[0], a[1] - b[1], a[2] - b[2]];
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
function dot3(a, b) {
|
| 33 |
+
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
function shadePaper(intensity) {
|
| 37 |
+
const lit = clamp(0.3 + 0.7 * Math.abs(intensity), 0.0, 1.0);
|
| 38 |
+
const r = Math.round(PAPER_RGB[0] * lit);
|
| 39 |
+
const g = Math.round(PAPER_RGB[1] * lit);
|
| 40 |
+
const b = Math.round(PAPER_RGB[2] * lit);
|
| 41 |
+
return `rgb(${r}, ${g}, ${b})`;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
function buildGridMesh(resolution = 18) {
|
| 45 |
+
const vertices = [];
|
| 46 |
+
for (let y = 0; y <= resolution; y += 1) {
|
| 47 |
+
for (let x = 0; x <= resolution; x += 1) {
|
| 48 |
+
vertices.push([x / resolution, y / resolution, 0]);
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
const triangles = [];
|
| 53 |
+
const stride = resolution + 1;
|
| 54 |
+
for (let y = 0; y < resolution; y += 1) {
|
| 55 |
+
for (let x = 0; x < resolution; x += 1) {
|
| 56 |
+
const a = y * stride + x;
|
| 57 |
+
const b = a + 1;
|
| 58 |
+
const c = a + stride;
|
| 59 |
+
const d = c + 1;
|
| 60 |
+
triangles.push([a, b, d]);
|
| 61 |
+
triangles.push([a, d, c]);
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
return { vertices, triangles, resolution };
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
function rotateAroundAxis(point, axisPoint, axisDir, angleRad) {
|
| 69 |
+
const px = point[0] - axisPoint[0];
|
| 70 |
+
const py = point[1] - axisPoint[1];
|
| 71 |
+
const pz = point[2] - axisPoint[2];
|
| 72 |
+
|
| 73 |
+
const kx = axisDir[0];
|
| 74 |
+
const ky = axisDir[1];
|
| 75 |
+
const kz = axisDir[2];
|
| 76 |
+
|
| 77 |
+
const cosA = Math.cos(angleRad);
|
| 78 |
+
const sinA = Math.sin(angleRad);
|
| 79 |
+
|
| 80 |
+
const crossX = ky * pz - kz * py;
|
| 81 |
+
const crossY = kz * px - kx * pz;
|
| 82 |
+
const crossZ = kx * py - ky * px;
|
| 83 |
+
|
| 84 |
+
const dot = px * kx + py * ky + pz * kz;
|
| 85 |
+
const oneMinus = 1.0 - cosA;
|
| 86 |
+
|
| 87 |
+
return [
|
| 88 |
+
axisPoint[0] + px * cosA + crossX * sinA + kx * dot * oneMinus,
|
| 89 |
+
axisPoint[1] + py * cosA + crossY * sinA + ky * dot * oneMinus,
|
| 90 |
+
axisPoint[2] + pz * cosA + crossZ * sinA + kz * dot * oneMinus,
|
| 91 |
+
];
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
function applyFoldToVertices(vertices, fold, progress) {
|
| 95 |
+
if (!fold || progress <= 0) return;
|
| 96 |
+
const [x1, y1] = fold.from;
|
| 97 |
+
const [x2, y2] = fold.to;
|
| 98 |
+
const dx = x2 - x1;
|
| 99 |
+
const dy = y2 - y1;
|
| 100 |
+
const len = Math.hypot(dx, dy);
|
| 101 |
+
if (len < 1e-8) return;
|
| 102 |
+
|
| 103 |
+
const sideValues = [];
|
| 104 |
+
let posCount = 0;
|
| 105 |
+
let negCount = 0;
|
| 106 |
+
|
| 107 |
+
for (let i = 0; i < vertices.length; i += 1) {
|
| 108 |
+
const v = vertices[i];
|
| 109 |
+
const side = dx * (v[1] - y1) - dy * (v[0] - x1);
|
| 110 |
+
sideValues.push(side);
|
| 111 |
+
if (side > SIDE_EPS) posCount += 1;
|
| 112 |
+
else if (side < -SIDE_EPS) negCount += 1;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
let rotatePositive = posCount <= negCount;
|
| 116 |
+
if (posCount === 0 && negCount > 0) rotatePositive = false;
|
| 117 |
+
if (negCount === 0 && posCount > 0) rotatePositive = true;
|
| 118 |
+
if (posCount === 0 && negCount === 0) return;
|
| 119 |
+
|
| 120 |
+
const sign = fold.assignment === 'V' ? 1 : -1;
|
| 121 |
+
const angle = sign * MAX_FOLD_RAD * progress;
|
| 122 |
+
const axisPoint = [x1, y1, 0];
|
| 123 |
+
const axisDir = [dx / len, dy / len, 0];
|
| 124 |
+
|
| 125 |
+
for (let i = 0; i < vertices.length; i += 1) {
|
| 126 |
+
const side = sideValues[i];
|
| 127 |
+
const shouldRotate = rotatePositive ? side > SIDE_EPS : side < -SIDE_EPS;
|
| 128 |
+
if (!shouldRotate) continue;
|
| 129 |
+
vertices[i] = rotateAroundAxis(vertices[i], axisPoint, axisDir, angle);
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
function projectVertex(vertex, dim) {
|
| 134 |
+
let x = vertex[0] - 0.5;
|
| 135 |
+
let y = vertex[1] - 0.5;
|
| 136 |
+
let z = vertex[2];
|
| 137 |
+
|
| 138 |
+
const pitch = 1.04;
|
| 139 |
+
const yaw = -0.78;
|
| 140 |
+
|
| 141 |
+
const cp = Math.cos(pitch);
|
| 142 |
+
const sp = Math.sin(pitch);
|
| 143 |
+
const y1 = y * cp - z * sp;
|
| 144 |
+
const z1 = y * sp + z * cp;
|
| 145 |
+
|
| 146 |
+
const cy = Math.cos(yaw);
|
| 147 |
+
const sy = Math.sin(yaw);
|
| 148 |
+
const x2 = x * cy + z1 * sy;
|
| 149 |
+
const z2 = -x * sy + z1 * cy;
|
| 150 |
+
|
| 151 |
+
const camDist = 2.8;
|
| 152 |
+
const perspective = camDist / (camDist - z2);
|
| 153 |
+
|
| 154 |
+
return {
|
| 155 |
+
x: dim * 0.5 + x2 * perspective * dim * 0.82,
|
| 156 |
+
y: dim * 0.52 - y1 * perspective * dim * 0.82,
|
| 157 |
+
z: z2,
|
| 158 |
+
};
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
function foldProgresses(stepValue, foldCount, mode, totalSteps) {
|
| 162 |
+
const values = new Array(foldCount).fill(0);
|
| 163 |
+
if (foldCount === 0) return values;
|
| 164 |
+
|
| 165 |
+
if (mode === 'final') {
|
| 166 |
+
const startCollapse = Math.max(totalSteps - 1, 0);
|
| 167 |
+
const collapse = clamp(stepValue - startCollapse, 0, 1);
|
| 168 |
+
for (let i = 0; i < foldCount; i += 1) values[i] = collapse;
|
| 169 |
+
return values;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
for (let i = 0; i < foldCount; i += 1) {
|
| 173 |
+
if (stepValue >= i + 1) values[i] = 1;
|
| 174 |
+
else if (stepValue > i) values[i] = clamp(stepValue - i, 0, 1);
|
| 175 |
+
}
|
| 176 |
+
return values;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
function stepEasing(t) {
|
| 180 |
+
return t < 0.5 ? 4 * t * t * t : 1 - ((-2 * t + 2) ** 3) / 2;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
export default function Fold3DCanvas({
|
| 184 |
+
steps,
|
| 185 |
+
currentStep,
|
| 186 |
+
totalSteps,
|
| 187 |
+
mode = 'progressive',
|
| 188 |
+
dim = 280,
|
| 189 |
+
}) {
|
| 190 |
+
const canvasRef = useRef(null);
|
| 191 |
+
const rafRef = useRef(null);
|
| 192 |
+
const animatedStepRef = useRef(currentStep);
|
| 193 |
+
|
| 194 |
+
const folds = useMemo(
|
| 195 |
+
() => (steps || [])
|
| 196 |
+
.map((s) => s.fold)
|
| 197 |
+
.filter(Boolean)
|
| 198 |
+
.map((fold) => ({
|
| 199 |
+
from: [Number(fold.from_point[0]), Number(fold.from_point[1])],
|
| 200 |
+
to: [Number(fold.to_point[0]), Number(fold.to_point[1])],
|
| 201 |
+
assignment: fold.assignment === 'M' ? 'M' : 'V',
|
| 202 |
+
})),
|
| 203 |
+
[steps],
|
| 204 |
+
);
|
| 205 |
+
|
| 206 |
+
const mesh = useMemo(() => buildGridMesh(18), []);
|
| 207 |
+
|
| 208 |
+
const draw = useCallback((stepValue) => {
|
| 209 |
+
const canvas = canvasRef.current;
|
| 210 |
+
if (!canvas) return;
|
| 211 |
+
const ctx = canvas.getContext('2d');
|
| 212 |
+
if (!ctx) return;
|
| 213 |
+
|
| 214 |
+
ctx.clearRect(0, 0, dim, dim);
|
| 215 |
+
ctx.fillStyle = '#121220';
|
| 216 |
+
ctx.fillRect(0, 0, dim, dim);
|
| 217 |
+
|
| 218 |
+
const vertices = mesh.vertices.map((v) => [v[0], v[1], v[2]]);
|
| 219 |
+
const progress = foldProgresses(stepValue, folds.length, mode, totalSteps);
|
| 220 |
+
|
| 221 |
+
for (let i = 0; i < folds.length; i += 1) {
|
| 222 |
+
if (progress[i] <= 0) continue;
|
| 223 |
+
applyFoldToVertices(vertices, folds[i], progress[i]);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
const projected = vertices.map((v) => projectVertex(v, dim));
|
| 227 |
+
|
| 228 |
+
const tris = mesh.triangles.map((tri) => {
|
| 229 |
+
const p0 = projected[tri[0]];
|
| 230 |
+
const p1 = projected[tri[1]];
|
| 231 |
+
const p2 = projected[tri[2]];
|
| 232 |
+
const avgZ = (p0.z + p1.z + p2.z) / 3;
|
| 233 |
+
|
| 234 |
+
const v0 = vertices[tri[0]];
|
| 235 |
+
const v1 = vertices[tri[1]];
|
| 236 |
+
const v2 = vertices[tri[2]];
|
| 237 |
+
const normal = normalize3(cross3(sub3(v1, v0), sub3(v2, v0)));
|
| 238 |
+
const intensity = dot3(normal, LIGHT_DIR);
|
| 239 |
+
|
| 240 |
+
return {
|
| 241 |
+
tri,
|
| 242 |
+
avgZ,
|
| 243 |
+
shade: shadePaper(intensity),
|
| 244 |
+
};
|
| 245 |
+
});
|
| 246 |
+
|
| 247 |
+
tris.sort((a, b) => a.avgZ - b.avgZ);
|
| 248 |
+
|
| 249 |
+
for (const triInfo of tris) {
|
| 250 |
+
const [a, b, c] = triInfo.tri;
|
| 251 |
+
const p0 = projected[a];
|
| 252 |
+
const p1 = projected[b];
|
| 253 |
+
const p2 = projected[c];
|
| 254 |
+
|
| 255 |
+
ctx.beginPath();
|
| 256 |
+
ctx.moveTo(p0.x, p0.y);
|
| 257 |
+
ctx.lineTo(p1.x, p1.y);
|
| 258 |
+
ctx.lineTo(p2.x, p2.y);
|
| 259 |
+
ctx.closePath();
|
| 260 |
+
ctx.fillStyle = triInfo.shade;
|
| 261 |
+
ctx.fill();
|
| 262 |
+
ctx.strokeStyle = 'rgba(42, 42, 58, 0.22)';
|
| 263 |
+
ctx.lineWidth = 0.55;
|
| 264 |
+
ctx.stroke();
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
const res = mesh.resolution;
|
| 268 |
+
const stride = res + 1;
|
| 269 |
+
const pointToIndex = (pt) => {
|
| 270 |
+
const ix = clamp(Math.round(pt[0] * res), 0, res);
|
| 271 |
+
const iy = clamp(Math.round(pt[1] * res), 0, res);
|
| 272 |
+
return iy * stride + ix;
|
| 273 |
+
};
|
| 274 |
+
|
| 275 |
+
for (let i = 0; i < folds.length; i += 1) {
|
| 276 |
+
if (progress[i] <= 0.02) continue;
|
| 277 |
+
const fold = folds[i];
|
| 278 |
+
const aIdx = pointToIndex(fold.from);
|
| 279 |
+
const bIdx = pointToIndex(fold.to);
|
| 280 |
+
const pa = projected[aIdx];
|
| 281 |
+
const pb = projected[bIdx];
|
| 282 |
+
|
| 283 |
+
ctx.beginPath();
|
| 284 |
+
ctx.moveTo(pa.x, pa.y);
|
| 285 |
+
ctx.lineTo(pb.x, pb.y);
|
| 286 |
+
ctx.strokeStyle = fold.assignment === 'M' ? MOUNTAIN_COLOR : VALLEY_COLOR;
|
| 287 |
+
ctx.globalAlpha = clamp(0.35 + 0.65 * progress[i], 0, 1);
|
| 288 |
+
ctx.lineWidth = 2.15;
|
| 289 |
+
ctx.stroke();
|
| 290 |
+
ctx.globalAlpha = 1;
|
| 291 |
+
}
|
| 292 |
+
}, [dim, folds, mesh, mode, totalSteps]);
|
| 293 |
+
|
| 294 |
+
useEffect(() => {
|
| 295 |
+
draw(animatedStepRef.current);
|
| 296 |
+
}, [draw]);
|
| 297 |
+
|
| 298 |
+
useEffect(() => {
|
| 299 |
+
cancelAnimationFrame(rafRef.current);
|
| 300 |
+
const startValue = animatedStepRef.current;
|
| 301 |
+
const endValue = currentStep;
|
| 302 |
+
const durationMs = 420;
|
| 303 |
+
const startAt = performance.now();
|
| 304 |
+
|
| 305 |
+
const tick = (now) => {
|
| 306 |
+
const t = clamp((now - startAt) / durationMs, 0, 1);
|
| 307 |
+
const eased = stepEasing(t);
|
| 308 |
+
const value = startValue + (endValue - startValue) * eased;
|
| 309 |
+
animatedStepRef.current = value;
|
| 310 |
+
draw(value);
|
| 311 |
+
if (t < 1) rafRef.current = requestAnimationFrame(tick);
|
| 312 |
+
};
|
| 313 |
+
|
| 314 |
+
rafRef.current = requestAnimationFrame(tick);
|
| 315 |
+
return () => cancelAnimationFrame(rafRef.current);
|
| 316 |
+
}, [currentStep, draw]);
|
| 317 |
+
|
| 318 |
+
return (
|
| 319 |
+
<canvas
|
| 320 |
+
ref={canvasRef}
|
| 321 |
+
width={dim}
|
| 322 |
+
height={dim}
|
| 323 |
+
className="canvas-3d"
|
| 324 |
+
aria-label="3D fold preview"
|
| 325 |
+
/>
|
| 326 |
+
);
|
| 327 |
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function BoolVal({ value }) {
|
| 2 |
+
if (value === null || value === undefined) {
|
| 3 |
+
return <span className="info-val dim">—</span>;
|
| 4 |
+
}
|
| 5 |
+
return (
|
| 6 |
+
<span className={`info-val ${value ? 'bool-true' : 'bool-false'}`}>
|
| 7 |
+
{value ? 'TRUE' : 'FALSE'}
|
| 8 |
+
</span>
|
| 9 |
+
);
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
function TextVal({ value, dim = false }) {
|
| 13 |
+
if (value === null || value === undefined) {
|
| 14 |
+
return <span className="info-val dim">—</span>;
|
| 15 |
+
}
|
| 16 |
+
return (
|
| 17 |
+
<span className={`info-val${dim ? ' dim' : ''}`}>
|
| 18 |
+
{String(value).toUpperCase()}
|
| 19 |
+
</span>
|
| 20 |
+
);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
function NumVal({ value }) {
|
| 24 |
+
if (value === null || value === undefined) {
|
| 25 |
+
return <span className="info-val dim">—</span>;
|
| 26 |
+
}
|
| 27 |
+
return <span className="info-val">{value}</span>;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
export default function InfoBadges({ info, targetDef }) {
|
| 31 |
+
return (
|
| 32 |
+
<div className="info-badges">
|
| 33 |
+
<div className="info-row">
|
| 34 |
+
<span className="info-key">n_creases</span>
|
| 35 |
+
<NumVal value={info ? info.n_creases : (targetDef ? targetDef.n_creases : null)} />
|
| 36 |
+
</div>
|
| 37 |
+
<div className="info-row">
|
| 38 |
+
<span className="info-key">interior_verts</span>
|
| 39 |
+
<NumVal value={info ? info.n_interior_vertices : null} />
|
| 40 |
+
</div>
|
| 41 |
+
<div className="info-row">
|
| 42 |
+
<span className="info-key">local_fold</span>
|
| 43 |
+
<BoolVal value={info ? info.local_foldability : null} />
|
| 44 |
+
</div>
|
| 45 |
+
<div className="info-row">
|
| 46 |
+
<span className="info-key">blb_sat</span>
|
| 47 |
+
<BoolVal value={info ? info.blb_satisfied : null} />
|
| 48 |
+
</div>
|
| 49 |
+
<div className="info-row">
|
| 50 |
+
<span className="info-key">global_fold</span>
|
| 51 |
+
<TextVal
|
| 52 |
+
value={info ? info.global_foldability : null}
|
| 53 |
+
dim={true}
|
| 54 |
+
/>
|
| 55 |
+
</div>
|
| 56 |
+
{targetDef && (
|
| 57 |
+
<>
|
| 58 |
+
<div className="info-row">
|
| 59 |
+
<span className="info-key">level</span>
|
| 60 |
+
<span className="info-val">LVL {targetDef.level}</span>
|
| 61 |
+
</div>
|
| 62 |
+
<div className="info-row">
|
| 63 |
+
<span className="info-key">target</span>
|
| 64 |
+
<span className="info-val" style={{ fontSize: '10px', textAlign: 'right', maxWidth: '140px', wordBreak: 'break-word' }}>
|
| 65 |
+
{targetDef.name.replace(/_/g, ' ').toUpperCase()}
|
| 66 |
+
</span>
|
| 67 |
+
</div>
|
| 68 |
+
</>
|
| 69 |
+
)}
|
| 70 |
+
</div>
|
| 71 |
+
);
|
| 72 |
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export default function PlayerControls({
|
| 2 |
+
playing,
|
| 3 |
+
onPlay,
|
| 4 |
+
onPause,
|
| 5 |
+
onNext,
|
| 6 |
+
onPrev,
|
| 7 |
+
onReset,
|
| 8 |
+
currentStep,
|
| 9 |
+
totalSteps,
|
| 10 |
+
disabled,
|
| 11 |
+
}) {
|
| 12 |
+
const atStart = currentStep === 0;
|
| 13 |
+
const atEnd = currentStep >= totalSteps;
|
| 14 |
+
|
| 15 |
+
return (
|
| 16 |
+
<div className="player-controls">
|
| 17 |
+
<button
|
| 18 |
+
className="ctrl-btn"
|
| 19 |
+
onClick={onReset}
|
| 20 |
+
disabled={disabled || atStart}
|
| 21 |
+
title="Reset to start"
|
| 22 |
+
>
|
| 23 |
+
⏮ RST
|
| 24 |
+
</button>
|
| 25 |
+
<button
|
| 26 |
+
className="ctrl-btn"
|
| 27 |
+
onClick={onPrev}
|
| 28 |
+
disabled={disabled || atStart}
|
| 29 |
+
title="Previous step"
|
| 30 |
+
>
|
| 31 |
+
◀ PREV
|
| 32 |
+
</button>
|
| 33 |
+
<span className="ctrl-step-display">
|
| 34 |
+
{disabled ? '—/—' : `${currentStep} / ${totalSteps}`}
|
| 35 |
+
</span>
|
| 36 |
+
<button
|
| 37 |
+
className="ctrl-btn"
|
| 38 |
+
onClick={onNext}
|
| 39 |
+
disabled={disabled || atEnd}
|
| 40 |
+
title="Next step"
|
| 41 |
+
>
|
| 42 |
+
NEXT ▶
|
| 43 |
+
</button>
|
| 44 |
+
<button
|
| 45 |
+
className={`ctrl-btn play`}
|
| 46 |
+
onClick={playing ? onPause : onPlay}
|
| 47 |
+
disabled={disabled || (!playing && atEnd)}
|
| 48 |
+
title={playing ? 'Pause' : 'Play'}
|
| 49 |
+
>
|
| 50 |
+
{playing ? '⏸ PAUSE' : '▶▶ PLAY'}
|
| 51 |
+
</button>
|
| 52 |
+
</div>
|
| 53 |
+
);
|
| 54 |
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const REWARD_FIELDS = [
|
| 2 |
+
{ key: 'kawasaki', label: 'kawasaki', color: 'var(--validity)' },
|
| 3 |
+
{ key: 'maekawa', label: 'maekawa', color: 'var(--validity)' },
|
| 4 |
+
{ key: 'blb', label: 'blb', color: 'var(--validity)' },
|
| 5 |
+
{ key: 'progress', label: 'progress', color: 'var(--progress)' },
|
| 6 |
+
{ key: 'economy', label: 'economy', color: 'var(--economy)' },
|
| 7 |
+
];
|
| 8 |
+
|
| 9 |
+
const TOTAL_FIELD = { key: 'total', label: 'total', color: 'var(--text-primary)' };
|
| 10 |
+
|
| 11 |
+
function RewardRow({ label, color, value }) {
|
| 12 |
+
const isDash = value === null || value === undefined;
|
| 13 |
+
const pct = isDash ? 0 : Math.min(Math.max(value, 0), 1) * 100;
|
| 14 |
+
|
| 15 |
+
return (
|
| 16 |
+
<div className="reward-row">
|
| 17 |
+
<span className="reward-label">{label}</span>
|
| 18 |
+
<div className="reward-track">
|
| 19 |
+
<div
|
| 20 |
+
className="reward-bar"
|
| 21 |
+
style={{ width: `${pct}%`, background: color }}
|
| 22 |
+
/>
|
| 23 |
+
</div>
|
| 24 |
+
<span className={`reward-value${isDash ? ' dim' : ''}`}>
|
| 25 |
+
{isDash ? '—' : value.toFixed(2)}
|
| 26 |
+
</span>
|
| 27 |
+
</div>
|
| 28 |
+
);
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
export default function RewardPanel({ reward }) {
|
| 32 |
+
return (
|
| 33 |
+
<div className="reward-panel">
|
| 34 |
+
{REWARD_FIELDS.map(({ key, label, color }) => (
|
| 35 |
+
<RewardRow
|
| 36 |
+
key={key}
|
| 37 |
+
label={label}
|
| 38 |
+
color={color}
|
| 39 |
+
value={reward ? reward[key] : null}
|
| 40 |
+
/>
|
| 41 |
+
))}
|
| 42 |
+
<div className="reward-divider" />
|
| 43 |
+
<RewardRow
|
| 44 |
+
label={TOTAL_FIELD.label}
|
| 45 |
+
color={TOTAL_FIELD.color}
|
| 46 |
+
value={reward ? reward[TOTAL_FIELD.key] : null}
|
| 47 |
+
/>
|
| 48 |
+
</div>
|
| 49 |
+
);
|
| 50 |
+
}
|
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useEffect, useRef } from 'react';
|
| 2 |
+
|
| 3 |
+
function rewardDelta(step, prevStep) {
|
| 4 |
+
if (!step || !step.reward) return null;
|
| 5 |
+
const curr = step.reward.total;
|
| 6 |
+
if (prevStep && prevStep.reward) {
|
| 7 |
+
return curr - prevStep.reward.total;
|
| 8 |
+
}
|
| 9 |
+
return curr;
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
export default function StepFeed({ steps, currentStep }) {
|
| 13 |
+
const feedRef = useRef(null);
|
| 14 |
+
const activeRef = useRef(null);
|
| 15 |
+
|
| 16 |
+
useEffect(() => {
|
| 17 |
+
if (activeRef.current) {
|
| 18 |
+
activeRef.current.scrollIntoView({ block: 'nearest', behavior: 'smooth' });
|
| 19 |
+
}
|
| 20 |
+
}, [currentStep]);
|
| 21 |
+
|
| 22 |
+
if (!steps || steps.length === 0) {
|
| 23 |
+
return (
|
| 24 |
+
<div className="step-feed">
|
| 25 |
+
<div style={{ padding: '16px', color: 'var(--text-dim)', fontFamily: 'var(--font-display)', fontSize: '11px' }}>
|
| 26 |
+
NO STEPS LOADED
|
| 27 |
+
</div>
|
| 28 |
+
</div>
|
| 29 |
+
);
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
return (
|
| 33 |
+
<div className="step-feed" ref={feedRef}>
|
| 34 |
+
{steps.map((step, idx) => {
|
| 35 |
+
const stepNum = idx + 1;
|
| 36 |
+
const isActive = currentStep === stepNum;
|
| 37 |
+
const delta = rewardDelta(step, idx > 0 ? steps[idx - 1] : null);
|
| 38 |
+
const asgn = step.fold ? step.fold.assignment : null;
|
| 39 |
+
const instruction = step.fold ? step.fold.instruction : (step.prompt || '');
|
| 40 |
+
|
| 41 |
+
return (
|
| 42 |
+
<div
|
| 43 |
+
key={stepNum}
|
| 44 |
+
className={`step-entry${isActive ? ' active' : ''}`}
|
| 45 |
+
ref={isActive ? activeRef : null}
|
| 46 |
+
>
|
| 47 |
+
<div className="step-entry-top">
|
| 48 |
+
<span className="step-num">#{stepNum}</span>
|
| 49 |
+
<span className="step-instruction">{instruction}</span>
|
| 50 |
+
{asgn && (
|
| 51 |
+
<span className={`assign-badge ${asgn}`}>{asgn}</span>
|
| 52 |
+
)}
|
| 53 |
+
</div>
|
| 54 |
+
{delta !== null && (
|
| 55 |
+
<div className="step-reward-delta">
|
| 56 |
+
{'\u0394'} total:{' '}
|
| 57 |
+
<span className={delta >= 0 ? 'delta-positive' : 'delta-negative'}>
|
| 58 |
+
{delta >= 0 ? '+' : ''}{delta.toFixed(3)}
|
| 59 |
+
</span>
|
| 60 |
+
{step.reward && (
|
| 61 |
+
<span style={{ color: 'var(--text-dim)' }}>
|
| 62 |
+
{' '}| progress: {step.reward.progress.toFixed(2)}
|
| 63 |
+
{' '}| economy: {step.reward.economy.toFixed(2)}
|
| 64 |
+
</span>
|
| 65 |
+
)}
|
| 66 |
+
</div>
|
| 67 |
+
)}
|
| 68 |
+
</div>
|
| 69 |
+
);
|
| 70 |
+
})}
|
| 71 |
+
</div>
|
| 72 |
+
);
|
| 73 |
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function groupByLevel(targets) {
|
| 2 |
+
const levels = {};
|
| 3 |
+
Object.values(targets).forEach(t => {
|
| 4 |
+
if (!levels[t.level]) levels[t.level] = [];
|
| 5 |
+
levels[t.level].push(t);
|
| 6 |
+
});
|
| 7 |
+
return levels;
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
export default function TargetSelector({ targets, selected, onChange }) {
|
| 11 |
+
const levels = groupByLevel(targets);
|
| 12 |
+
const sortedLevels = Object.keys(levels).sort((a, b) => Number(a) - Number(b));
|
| 13 |
+
|
| 14 |
+
return (
|
| 15 |
+
<div className="target-selector">
|
| 16 |
+
<span className="target-selector-label">TARGET</span>
|
| 17 |
+
<select
|
| 18 |
+
className="target-select"
|
| 19 |
+
value={selected}
|
| 20 |
+
onChange={e => onChange(e.target.value)}
|
| 21 |
+
>
|
| 22 |
+
{sortedLevels.length === 0 ? (
|
| 23 |
+
<option value="">LOADING...</option>
|
| 24 |
+
) : (
|
| 25 |
+
sortedLevels.map(level => (
|
| 26 |
+
<optgroup key={level} label={`── LEVEL ${level}`}>
|
| 27 |
+
{levels[level].map(t => (
|
| 28 |
+
<option key={t.name} value={t.name}>
|
| 29 |
+
{t.name.replace(/_/g, ' ').toUpperCase()}
|
| 30 |
+
</option>
|
| 31 |
+
))}
|
| 32 |
+
</optgroup>
|
| 33 |
+
))
|
| 34 |
+
)}
|
| 35 |
+
</select>
|
| 36 |
+
</div>
|
| 37 |
+
);
|
| 38 |
+
}
|
|
@@ -1,13 +1,34 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
| 2 |
margin: 0;
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
-webkit-font-smoothing: antialiased;
|
| 7 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
}
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
monospace;
|
| 13 |
}
|
|
|
|
| 1 |
+
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&family=IBM+Plex+Mono:wght@300;400;500&display=swap');
|
| 2 |
+
|
| 3 |
+
*, *::before, *::after {
|
| 4 |
+
box-sizing: border-box;
|
| 5 |
margin: 0;
|
| 6 |
+
padding: 0;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
body {
|
| 10 |
+
background: #0d0d14;
|
| 11 |
+
color: #f8fafc;
|
| 12 |
+
font-family: 'IBM Plex Mono', monospace;
|
| 13 |
+
font-size: 13px;
|
| 14 |
+
line-height: 1.5;
|
| 15 |
-webkit-font-smoothing: antialiased;
|
| 16 |
+
overflow-x: hidden;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
::-webkit-scrollbar {
|
| 20 |
+
width: 4px;
|
| 21 |
+
height: 4px;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
::-webkit-scrollbar-track {
|
| 25 |
+
background: #0d0d14;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
::-webkit-scrollbar-thumb {
|
| 29 |
+
background: #2a2a3a;
|
| 30 |
}
|
| 31 |
|
| 32 |
+
::-webkit-scrollbar-thumb:hover {
|
| 33 |
+
background: #3a3a5a;
|
|
|
|
| 34 |
}
|
|
@@ -1,13 +1 @@
|
|
| 1 |
-
|
| 2 |
-
if (onPerfEntry && onPerfEntry instanceof Function) {
|
| 3 |
-
import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
|
| 4 |
-
getCLS(onPerfEntry);
|
| 5 |
-
getFID(onPerfEntry);
|
| 6 |
-
getFCP(onPerfEntry);
|
| 7 |
-
getLCP(onPerfEntry);
|
| 8 |
-
getTTFB(onPerfEntry);
|
| 9 |
-
});
|
| 10 |
-
}
|
| 11 |
-
};
|
| 12 |
-
|
| 13 |
-
export default reportWebVitals;
|
|
|
|
| 1 |
+
export default function reportWebVitals() {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
File without changes
|
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
from env.graph import CreaseGraph, VERTEX_TOL
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def test_init_boundary():
|
| 7 |
+
g = CreaseGraph()
|
| 8 |
+
assert len(g.vertices) == 4
|
| 9 |
+
assert len(g.edges) == 4
|
| 10 |
+
for eid, (v1, v2, assignment) in g.edges.items():
|
| 11 |
+
assert assignment == 'B'
|
| 12 |
+
assert g.interior_vertices() == []
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_add_vertex_dedup():
|
| 16 |
+
g = CreaseGraph()
|
| 17 |
+
id1 = g.add_vertex(0.5, 0.5)
|
| 18 |
+
id2 = g.add_vertex(0.5, 0.5)
|
| 19 |
+
assert id1 == id2
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_add_vertex_dedup_near():
|
| 23 |
+
g = CreaseGraph()
|
| 24 |
+
id1 = g.add_vertex(0.5, 0.5)
|
| 25 |
+
id2 = g.add_vertex(0.5 + VERTEX_TOL * 0.5, 0.5)
|
| 26 |
+
assert id1 == id2
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def test_cyclic_order():
|
| 30 |
+
g = CreaseGraph()
|
| 31 |
+
center_id = g.add_vertex(0.5, 0.5)
|
| 32 |
+
|
| 33 |
+
right_id = g.add_vertex(0.8, 0.5) # 0 degrees
|
| 34 |
+
top_id = g.add_vertex(0.5, 0.8) # 90 degrees
|
| 35 |
+
left_id = g.add_vertex(0.2, 0.5) # 180 degrees
|
| 36 |
+
bottom_id = g.add_vertex(0.5, 0.2) # 270 degrees / -90 degrees
|
| 37 |
+
|
| 38 |
+
e_right = g.add_edge(center_id, right_id, 'M')
|
| 39 |
+
e_top = g.add_edge(center_id, top_id, 'M')
|
| 40 |
+
e_left = g.add_edge(center_id, left_id, 'M')
|
| 41 |
+
e_bottom = g.add_edge(center_id, bottom_id, 'M')
|
| 42 |
+
|
| 43 |
+
cyclic = g.get_cyclic_edges(center_id)
|
| 44 |
+
# Sorted by angle ascending: right(0), top(90), left(180), bottom(-90 → 270)
|
| 45 |
+
# arctan2 for bottom gives -pi/2 which sorts before 0 in ascending order
|
| 46 |
+
# So actual ascending order: bottom(-pi/2), right(0), top(pi/2), left(pi)
|
| 47 |
+
assert len(cyclic) == 4
|
| 48 |
+
|
| 49 |
+
def edge_angle(eid):
|
| 50 |
+
ev1, ev2, _ = g.edges[eid]
|
| 51 |
+
other_id = ev2 if ev1 == center_id else ev1
|
| 52 |
+
ox, oy = g.vertices[other_id]
|
| 53 |
+
cx, cy = g.vertices[center_id]
|
| 54 |
+
return float(np.arctan2(oy - cy, ox - cx))
|
| 55 |
+
|
| 56 |
+
angles = [edge_angle(eid) for eid in cyclic]
|
| 57 |
+
assert angles == sorted(angles), "Edges should be sorted by ascending angle"
|
| 58 |
+
|
| 59 |
+
assert e_right in cyclic
|
| 60 |
+
assert e_top in cyclic
|
| 61 |
+
assert e_left in cyclic
|
| 62 |
+
assert e_bottom in cyclic
|
| 63 |
+
|
| 64 |
+
# Verify specific order: bottom < right < top < left in angle space
|
| 65 |
+
pos = {eid: i for i, eid in enumerate(cyclic)}
|
| 66 |
+
assert pos[e_bottom] < pos[e_right] < pos[e_top] < pos[e_left]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_interior_vertices_empty():
|
| 70 |
+
g = CreaseGraph()
|
| 71 |
+
assert g.interior_vertices() == []
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_interior_vertices_with_crease_intersection():
|
| 75 |
+
g = CreaseGraph()
|
| 76 |
+
center_id = g.add_vertex(0.5, 0.5)
|
| 77 |
+
assert center_id in g.interior_vertices()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def test_split_edge():
|
| 81 |
+
g = CreaseGraph()
|
| 82 |
+
# Find the bottom boundary edge (0,0)-(1,0) which is edge 0: v0-v1
|
| 83 |
+
original_edge_id = None
|
| 84 |
+
for eid, (v1, v2, assignment) in g.edges.items():
|
| 85 |
+
x1, y1 = g.vertices[v1]
|
| 86 |
+
x2, y2 = g.vertices[v2]
|
| 87 |
+
if {(x1, y1), (x2, y2)} == {(0.0, 0.0), (1.0, 0.0)}:
|
| 88 |
+
original_edge_id = eid
|
| 89 |
+
original_v1 = v1
|
| 90 |
+
original_v2 = v2
|
| 91 |
+
break
|
| 92 |
+
|
| 93 |
+
assert original_edge_id is not None
|
| 94 |
+
|
| 95 |
+
mid_id = g.add_vertex(0.5, 0.0)
|
| 96 |
+
eid1, eid2 = g.split_edge(original_edge_id, mid_id)
|
| 97 |
+
|
| 98 |
+
assert original_edge_id not in g.edges
|
| 99 |
+
|
| 100 |
+
assert eid1 in g.edges
|
| 101 |
+
assert eid2 in g.edges
|
| 102 |
+
|
| 103 |
+
_, _, a1 = g.edges[eid1]
|
| 104 |
+
_, _, a2 = g.edges[eid2]
|
| 105 |
+
assert a1 == 'B'
|
| 106 |
+
assert a2 == 'B'
|
| 107 |
+
|
| 108 |
+
def edge_vertex_set(eid):
|
| 109 |
+
v1, v2, _ = g.edges[eid]
|
| 110 |
+
return {v1, v2}
|
| 111 |
+
|
| 112 |
+
assert mid_id in edge_vertex_set(eid1)
|
| 113 |
+
assert mid_id in edge_vertex_set(eid2)
|
| 114 |
+
assert original_v1 in edge_vertex_set(eid1) or original_v1 in edge_vertex_set(eid2)
|
| 115 |
+
assert original_v2 in edge_vertex_set(eid1) or original_v2 in edge_vertex_set(eid2)
|
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openenv_runtime.environment import OpenEnvOrigamiEnvironment
|
| 2 |
+
from openenv_runtime.models import OrigamiAction, OrigamiFold, OrigamiObservation
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def test_openenv_reset_returns_observation():
|
| 6 |
+
env = OpenEnvOrigamiEnvironment(default_mode="step", max_steps=8)
|
| 7 |
+
obs = env.reset(target_name="half_horizontal", episode_id="ep-1")
|
| 8 |
+
|
| 9 |
+
assert isinstance(obs, OrigamiObservation)
|
| 10 |
+
assert obs.done is False
|
| 11 |
+
assert obs.target_name == "half_horizontal"
|
| 12 |
+
assert "prompt" in obs.model_fields_set
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_openenv_step_single_fold_completes_simple_target():
|
| 16 |
+
env = OpenEnvOrigamiEnvironment(default_mode="step", max_steps=8)
|
| 17 |
+
env.reset(target_name="half_horizontal")
|
| 18 |
+
|
| 19 |
+
action = OrigamiAction(
|
| 20 |
+
mode="single",
|
| 21 |
+
fold=OrigamiFold(
|
| 22 |
+
from_point=[0.0, 0.5],
|
| 23 |
+
to_point=[1.0, 0.5],
|
| 24 |
+
assignment="V",
|
| 25 |
+
instruction="Valley fold along horizontal center line",
|
| 26 |
+
),
|
| 27 |
+
)
|
| 28 |
+
obs = env.step(action)
|
| 29 |
+
|
| 30 |
+
assert obs.reward is not None
|
| 31 |
+
assert obs.reward > 1.0
|
| 32 |
+
assert obs.done is True
|
| 33 |
+
assert obs.reward_components.get("completion", 0.0) >= 10.0
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_openenv_step_sequence_mode_executes_completion():
|
| 37 |
+
env = OpenEnvOrigamiEnvironment(default_mode="step", max_steps=8)
|
| 38 |
+
env.reset(target_name="half_vertical")
|
| 39 |
+
|
| 40 |
+
completion = (
|
| 41 |
+
'<folds>[{"instruction": "Mountain fold vertical center", '
|
| 42 |
+
'"from": [0.5, 0.0], "to": [0.5, 1.0], "assignment": "M"}]</folds>'
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
obs = env.step(OrigamiAction(mode="sequence", completion=completion))
|
| 46 |
+
|
| 47 |
+
assert obs.done is True
|
| 48 |
+
assert obs.reward is not None
|
| 49 |
+
assert obs.reward > 1.0
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_openenv_state_contains_targets_and_step_count():
|
| 53 |
+
env = OpenEnvOrigamiEnvironment(default_mode="step", max_steps=8)
|
| 54 |
+
env.reset(target_name="half_horizontal", episode_id="ep-state")
|
| 55 |
+
|
| 56 |
+
state = env.state
|
| 57 |
+
|
| 58 |
+
assert state.episode_id == "ep-state"
|
| 59 |
+
assert state.step_count == 0
|
| 60 |
+
assert "half_horizontal" in state.available_targets
|
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
from env.paper_state import PaperState, UNIT_SQUARE_CORNERS
|
| 3 |
+
from env.graph import VERTEX_TOL
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def test_single_crease_no_interior_vertices():
|
| 7 |
+
paper = PaperState()
|
| 8 |
+
result = paper.add_crease([0.0, 0.5], [1.0, 0.5], 'V')
|
| 9 |
+
assert result['valid'] is True
|
| 10 |
+
interior = paper.graph.interior_vertices()
|
| 11 |
+
assert interior == [], f"Expected no interior vertices, got {interior}"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_anchor_points_initial():
|
| 15 |
+
paper = PaperState()
|
| 16 |
+
anchors = paper.anchor_points()
|
| 17 |
+
for corner in UNIT_SQUARE_CORNERS:
|
| 18 |
+
assert any(
|
| 19 |
+
abs(ax - corner[0]) < VERTEX_TOL and abs(ay - corner[1]) < VERTEX_TOL
|
| 20 |
+
for ax, ay in anchors
|
| 21 |
+
), f"Corner {corner} not found in anchor_points"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_anchor_points_grow():
|
| 25 |
+
paper = PaperState()
|
| 26 |
+
result = paper.add_crease([0.0, 0.5], [1.0, 0.5], 'V')
|
| 27 |
+
assert result['valid'] is True
|
| 28 |
+
|
| 29 |
+
anchors = paper.anchor_points()
|
| 30 |
+
|
| 31 |
+
def has_point(px, py):
|
| 32 |
+
return any(abs(ax - px) < VERTEX_TOL and abs(ay - py) < VERTEX_TOL for ax, ay in anchors)
|
| 33 |
+
|
| 34 |
+
assert has_point(0.0, 0.5), "(0, 0.5) should be in anchor_points after crease"
|
| 35 |
+
assert has_point(1.0, 0.5), "(1, 0.5) should be in anchor_points after crease"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def test_invalid_assignment():
|
| 39 |
+
paper = PaperState()
|
| 40 |
+
result = paper.add_crease([0.0, 0.5], [1.0, 0.5], 'X')
|
| 41 |
+
assert result['valid'] is False
|
| 42 |
+
assert 'invalid_assignment' in result['errors']
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def test_fold_history():
|
| 46 |
+
paper = PaperState()
|
| 47 |
+
paper.add_crease([0.0, 0.5], [1.0, 0.5], 'M')
|
| 48 |
+
assert len(paper.fold_history) == 1
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def test_unanchored_returns_false_anchored():
|
| 52 |
+
paper = PaperState()
|
| 53 |
+
result = paper.add_crease([0.3, 0.3], [0.7, 0.7], 'M')
|
| 54 |
+
assert result['anchored'] is False
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def test_crease_edges_returned():
|
| 58 |
+
paper = PaperState()
|
| 59 |
+
paper.add_crease([0.0, 0.5], [1.0, 0.5], 'M')
|
| 60 |
+
edges = paper.crease_edges()
|
| 61 |
+
assert len(edges) >= 1
|
| 62 |
+
for e in edges:
|
| 63 |
+
assert e['assignment'] in ('M', 'V')
|
| 64 |
+
assert 'v1' in e
|
| 65 |
+
assert 'v2' in e
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def test_two_intersecting_creases():
|
| 69 |
+
paper = PaperState()
|
| 70 |
+
r1 = paper.add_crease([0.0, 0.5], [1.0, 0.5], 'M')
|
| 71 |
+
r2 = paper.add_crease([0.5, 0.0], [0.5, 1.0], 'V')
|
| 72 |
+
assert r1['valid'] is True
|
| 73 |
+
assert r2['valid'] is True
|
| 74 |
+
interior = paper.graph.interior_vertices()
|
| 75 |
+
assert len(interior) >= 1
|
| 76 |
+
coords = [paper.graph.vertices[vid] for vid in interior]
|
| 77 |
+
assert any(abs(x - 0.5) < VERTEX_TOL and abs(y - 0.5) < VERTEX_TOL for x, y in coords)
|