This view is limited to 50 files because it contains too many changes. See the raw diff here.
Files changed (50) hide show
  1. .dockerignore +18 -0
  2. .gitignore +2 -3
  3. Dockerfile +27 -0
  4. README.md +21 -67
  5. build/asset-manifest.json +13 -0
  6. build/favicon.ico +0 -0
  7. build/index.html +1 -0
  8. build/logo192.png +0 -0
  9. build/logo512.png +0 -0
  10. build/manifest.json +25 -0
  11. build/robots.txt +3 -0
  12. build/static/css/main.2ef3bb14.css +2 -0
  13. build/static/css/main.2ef3bb14.css.map +1 -0
  14. build/static/js/main.b94094b6.js +0 -0
  15. build/static/js/main.b94094b6.js.LICENSE.txt +49 -0
  16. build/static/js/main.b94094b6.js.map +0 -0
  17. docs/optigami_handoff.md +767 -0
  18. engine/fold_engine.py +0 -207
  19. engine/materials.py +0 -79
  20. engine/metrics.py +0 -104
  21. engine/paper.py +0 -488
  22. engine/physics.py +0 -257
  23. engine/validation.py +0 -256
  24. {engine → env}/__init__.py +0 -0
  25. env/environment.py +260 -0
  26. env/graph.py +117 -0
  27. env/paper_state.py +164 -0
  28. env/prompts.py +284 -0
  29. env/rewards.py +135 -0
  30. {planner → env/targets}/__init__.py +0 -0
  31. env/targets/accordion_3h.fold +67 -0
  32. env/targets/accordion_4h.fold +79 -0
  33. env/targets/diagonal_anti.fold +35 -0
  34. env/targets/diagonal_main.fold +35 -0
  35. env/targets/half_horizontal.fold +43 -0
  36. env/targets/half_vertical.fold +43 -0
  37. env/targets/thirds_h.fold +55 -0
  38. env/targets/thirds_v.fold +55 -0
  39. env/targets/validator.py +119 -0
  40. env/targets/validator_check.py +21 -0
  41. env/verifier.py +261 -0
  42. openenv.yaml +6 -0
  43. openenv_runtime/__init__.py +11 -0
  44. openenv_runtime/environment.py +53 -0
  45. openenv_runtime/models.py +53 -0
  46. openenv_server/__init__.py +1 -0
  47. openenv_server/app.py +269 -0
  48. package-lock.json +0 -0
  49. package.json +1 -0
  50. planner/decomposer.py +0 -284
.dockerignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .git
2
+ .DS_Store
3
+ __pycache__
4
+ *.pyc
5
+ *.pyo
6
+ .pytest_cache
7
+ .claude
8
+ node_modules
9
+ build
10
+ research
11
+ docs
12
+ plans
13
+ RESEARCH_NOTES.md
14
+ trainer
15
+ train.py
16
+ sim
17
+ viz
18
+ planner
.gitignore CHANGED
@@ -8,9 +8,6 @@
8
  # testing
9
  /coverage
10
 
11
- # production
12
- /build
13
-
14
  # misc
15
  .DS_Store
16
  .env.local
@@ -28,3 +25,5 @@ __pycache__/
28
 
29
  # Reference repos (not pushed to HF)
30
  .reference/
 
 
 
8
  # testing
9
  /coverage
10
 
 
 
 
11
  # misc
12
  .DS_Store
13
  .env.local
 
25
 
26
  # Reference repos (not pushed to HF)
27
  .reference/
28
+ *.pyc
29
+ __pycache__/
Dockerfile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM node:20-alpine AS web-builder
2
+
3
+ WORKDIR /web
4
+ COPY package*.json ./
5
+ RUN npm ci --no-audit --no-fund
6
+ COPY public ./public
7
+ COPY src ./src
8
+ RUN npm run build
9
+
10
+ FROM ghcr.io/meta-pytorch/openenv-base:latest
11
+
12
+ WORKDIR /app
13
+
14
+ # Install Python deps first for better layer caching
15
+ COPY requirements.txt ./
16
+ RUN pip install --no-cache-dir -r requirements.txt \
17
+ && pip install --no-cache-dir "openenv-core[core]>=0.2.1"
18
+
19
+ # Copy application source
20
+ COPY . /app
21
+
22
+ # Overlay the compiled React frontend
23
+ COPY --from=web-builder /web/build /app/build
24
+
25
+ EXPOSE 8000
26
+
27
+ CMD ["uvicorn", "openenv_server.app:app", "--host", "0.0.0.0", "--port", "8000"]
README.md CHANGED
@@ -3,81 +3,35 @@ title: Optigami
3
  emoji: 🐠
4
  colorFrom: indigo
5
  colorTo: red
6
- sdk: static
7
  pinned: false
8
- app_build_command: npm run build
9
- app_file: build/index.html
10
  license: mit
11
- short_description: ':)'
12
  ---
13
 
14
- # Getting Started with Create React App
15
 
16
- This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
 
 
 
17
 
18
- ## Available Scripts
 
 
19
 
20
- In the project directory, you can run:
21
 
22
- ### `npm start`
 
 
23
 
24
- Runs the app in the development mode.\
25
- Open [http://localhost:3000](http://localhost:3000) to view it in your browser.
26
 
27
- The page will reload when you make changes.\
28
- You may also see any lint errors in the console.
 
 
29
 
30
- ### `npm test`
31
-
32
- Launches the test runner in the interactive watch mode.\
33
- See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
34
-
35
- ### `npm run build`
36
-
37
- Builds the app for production to the `build` folder.\
38
- It correctly bundles React in production mode and optimizes the build for the best performance.
39
-
40
- The build is minified and the filenames include the hashes.\
41
- Your app is ready to be deployed!
42
-
43
- See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
44
-
45
- ### `npm run eject`
46
-
47
- **Note: this is a one-way operation. Once you `eject`, you can't go back!**
48
-
49
- If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
50
-
51
- Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own.
52
-
53
- You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it.
54
-
55
- ## Learn More
56
-
57
- You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
58
-
59
- To learn React, check out the [React documentation](https://reactjs.org/).
60
-
61
- ### Code Splitting
62
-
63
- This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting)
64
-
65
- ### Analyzing the Bundle Size
66
-
67
- This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size)
68
-
69
- ### Making a Progressive Web App
70
-
71
- This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app)
72
-
73
- ### Advanced Configuration
74
-
75
- This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration)
76
-
77
- ### Deployment
78
-
79
- This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
80
-
81
- ### `npm run build` fails to minify
82
-
83
- This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
 
3
  emoji: 🐠
4
  colorFrom: indigo
5
  colorTo: red
6
+ sdk: docker
7
  pinned: false
8
+ app_port: 8000
 
9
  license: mit
10
+ short_description: OpenEnv origami environment and demo
11
  ---
12
 
13
+ # Optigami
14
 
15
+ OpenEnv-compatible origami RL environment with:
16
+ - environment + reward checks in `env/`
17
+ - OpenEnv server adapter in `openenv_runtime/` and `openenv_server/`
18
+ - Dockerized deployment for Hugging Face Spaces
19
 
20
+ Entry point: `openenv_server.app:app`
21
+ Manifest: `openenv.yaml`
22
+ Container: `Dockerfile`
23
 
24
+ ## Local Run
25
 
26
+ ```bash
27
+ uvicorn openenv_server.app:app --host 0.0.0.0 --port 8000
28
+ ```
29
 
30
+ ## Frontend (optional local React demo)
 
31
 
32
+ ```bash
33
+ npm install
34
+ npm start
35
+ ```
36
 
37
+ This serves the dashboard against the FastAPI API.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/asset-manifest.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "files": {
3
+ "main.css": "/static/css/main.2ef3bb14.css",
4
+ "main.js": "/static/js/main.b94094b6.js",
5
+ "index.html": "/index.html",
6
+ "main.2ef3bb14.css.map": "/static/css/main.2ef3bb14.css.map",
7
+ "main.b94094b6.js.map": "/static/js/main.b94094b6.js.map"
8
+ },
9
+ "entrypoints": [
10
+ "static/css/main.2ef3bb14.css",
11
+ "static/js/main.b94094b6.js"
12
+ ]
13
+ }
build/favicon.ico ADDED
build/index.html ADDED
@@ -0,0 +1 @@
 
 
1
+ <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>React App</title><script defer="defer" src="/static/js/main.b94094b6.js"></script><link href="/static/css/main.2ef3bb14.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
build/logo192.png ADDED
build/logo512.png ADDED
build/manifest.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "short_name": "React App",
3
+ "name": "Create React App Sample",
4
+ "icons": [
5
+ {
6
+ "src": "favicon.ico",
7
+ "sizes": "64x64 32x32 24x24 16x16",
8
+ "type": "image/x-icon"
9
+ },
10
+ {
11
+ "src": "logo192.png",
12
+ "type": "image/png",
13
+ "sizes": "192x192"
14
+ },
15
+ {
16
+ "src": "logo512.png",
17
+ "type": "image/png",
18
+ "sizes": "512x512"
19
+ }
20
+ ],
21
+ "start_url": ".",
22
+ "display": "standalone",
23
+ "theme_color": "#000000",
24
+ "background_color": "#ffffff"
25
+ }
build/robots.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # https://www.robotstxt.org/robotstxt.html
2
+ User-agent: *
3
+ Disallow:
build/static/css/main.2ef3bb14.css ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ @import url(https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&family=IBM+Plex+Mono:wght@300;400;500&display=swap);*,:after,:before{box-sizing:border-box;margin:0;padding:0}body{-webkit-font-smoothing:antialiased;background:#0d0d14;color:#f8fafc;font-family:IBM Plex Mono,monospace;font-size:13px;line-height:1.5;overflow-x:hidden}::-webkit-scrollbar{height:4px;width:4px}::-webkit-scrollbar-track{background:#0d0d14}::-webkit-scrollbar-thumb{background:#2a2a3a}::-webkit-scrollbar-thumb:hover{background:#3a3a5a}:root{--bg:#0d0d14;--surface:#13131d;--surface-2:#1a1a2e;--paper-white:#fafaf5;--paper-edge:#2a2a3a;--mountain:#f59e0b;--valley:#38bdf8;--target-ghost:#7c3aed33;--target-ghost-stroke:#7c3aed73;--validity:#22d3ee;--progress:#22c55e;--economy:#a78bfa;--text-primary:#f8fafc;--text-dim:#64748b;--border:#2a2a3a;--border-bright:#3a3a5a;--font-display:"JetBrains Mono",monospace;--font-mono:"IBM Plex Mono",monospace}.app{background:#0d0d14;background:var(--bg);display:flex;flex-direction:column;height:100vh;overflow:hidden}.app-header{align-items:center;background:#13131d;background:var(--surface);border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);display:flex;flex-shrink:0;gap:24px;height:48px;padding:0 20px;z-index:10}.app-title{color:#f8fafc;color:var(--text-primary);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:14px;font-weight:700;letter-spacing:.12em;white-space:nowrap}.app-title .title-accent{color:#f59e0b;color:var(--mountain)}.header-sep{background:#2a2a3a;background:var(--border);flex-shrink:0;height:24px;width:1px}.header-right{align-items:center;display:flex;gap:16px;margin-left:auto}.replay-badge{background:#38bdf81a;border:1px solid #38bdf84d;border-radius:3px;color:#38bdf8;letter-spacing:.1em;padding:3px 8px}.back-to-grid-btn,.replay-badge{font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px}.back-to-grid-btn{background:#0000;border:1px solid #1e2a3a;border-radius:3px;color:#64748b;cursor:pointer;letter-spacing:.08em;padding:3px 10px}.back-to-grid-btn:hover{border-color:#64748b;color:#e2e8f0}.api-status{align-items:center;display:flex;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;gap:6px;letter-spacing:.08em}.api-status-dot{background:#64748b;background:var(--text-dim);border-radius:50%;height:6px;width:6px}.api-status-dot.ok{background:#22c55e;background:var(--progress);box-shadow:0 0 6px #22c55e;box-shadow:0 0 6px var(--progress)}.api-status-dot.err{background:#ef4444;box-shadow:0 0 6px #ef4444}.app-body{display:grid;flex:1 1;grid-template-columns:1fr 280px;overflow:hidden}.app-left{border-right:1px solid #2a2a3a;border-right:1px solid var(--border)}.app-left,.app-right{display:flex;flex-direction:column;overflow:hidden}.app-right{background:#13131d;background:var(--surface)}.canvas-row{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);display:flex;flex-shrink:0;gap:0;overflow-x:auto;padding:16px}.canvas-wrap{display:flex;flex:1 1;flex-direction:column;gap:8px;min-width:280px}.canvas-wrap+.canvas-wrap{margin-left:16px}.canvas-label{color:#64748b;color:var(--text-dim);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:500;letter-spacing:.14em;text-transform:uppercase}.canvas-svg{background:#fafaf5;background:var(--paper-white);display:block}.canvas-3d{background:linear-gradient(180deg,#1a1a2e,#0f101a);border:1px solid #2a2a3a;border:1px solid var(--border);display:block}.canvas-label-row{align-items:center;display:flex;gap:10px;justify-content:space-between}.fold-mode-toggle{background:#13131d;background:var(--surface);border:1px solid #2a2a3a;border:1px solid var(--border);display:inline-flex}.fold-mode-btn{background:#0000;border:none;color:#64748b;color:var(--text-dim);cursor:pointer;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:9px;letter-spacing:.08em;padding:3px 7px}.fold-mode-btn+.fold-mode-btn{border-left:1px solid #2a2a3a;border-left:1px solid var(--border)}.fold-mode-btn.active{background:#1f2538;color:#f8fafc;color:var(--text-primary)}.step-feed-section{display:flex;flex:1 1;flex-direction:column;overflow:hidden}.section-header{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);color:#64748b;color:var(--text-dim);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:500;letter-spacing:.14em;padding:8px 16px;text-transform:uppercase}.step-feed{flex:1 1;overflow-y:auto;padding:4px 0}.step-entry{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);cursor:default;display:flex;flex-direction:column;gap:2px;padding:8px 16px;transition:background .1s}.step-entry:hover{background:#13131d;background:var(--surface)}.step-entry.active{background:#1a1a2e;background:var(--surface-2);border-left:2px solid #38bdf8;border-left:2px solid var(--valley);padding-left:14px}.step-entry-top{align-items:center;display:flex;gap:8px}.step-num{color:#64748b;color:var(--text-dim);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:700;width:24px}.step-instruction{color:#f8fafc;color:var(--text-primary);flex:1 1;font-size:12px}.assign-badge{flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:700;line-height:1.4;padding:1px 5px}.assign-badge.M{background:#f59e0b;background:var(--mountain);color:#0d0d14}.assign-badge.V{background:#38bdf8;background:var(--valley);color:#0d0d14}.assign-badge.B{background:#3a3a5a;background:var(--border-bright)}.assign-badge.B,.step-reward-delta{color:#64748b;color:var(--text-dim)}.step-reward-delta{font-size:11px;padding-left:32px}.step-reward-delta .delta-positive{color:#22c55e;color:var(--progress)}.step-reward-delta .delta-negative{color:#ef4444}.reward-panel{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);flex-shrink:0;padding:12px 16px}.reward-row{align-items:center;display:flex;gap:8px;margin-bottom:6px}.reward-row:last-child{margin-bottom:0}.reward-label{color:#64748b;color:var(--text-dim);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:500;letter-spacing:.06em;text-transform:uppercase;width:72px}.reward-track{background:#0d0d14;background:var(--bg);border:1px solid #2a2a3a;border:1px solid var(--border);flex:1 1;height:8px;overflow:hidden}.reward-bar{height:100%;transition:width .4s ease}.reward-value{color:#f8fafc;color:var(--text-primary);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;font-weight:500;text-align:right;width:36px}.reward-value.dim{color:#64748b;color:var(--text-dim)}.reward-divider{background:#2a2a3a;background:var(--border);height:1px;margin:6px 0}.info-badges{display:flex;flex-direction:column;gap:8px;padding:12px 16px}.info-row{align-items:center;display:flex;gap:8px;justify-content:space-between}.info-key{color:#64748b;color:var(--text-dim);font-size:10px;font-weight:500;letter-spacing:.06em;text-transform:uppercase}.info-key,.info-val{font-family:JetBrains Mono,monospace;font-family:var(--font-display)}.info-val{color:#f8fafc;color:var(--text-primary);font-size:11px;font-weight:700}.info-val.bool-true{color:#22c55e;color:var(--progress)}.info-val.bool-false{color:#ef4444}.info-val.dim{color:#64748b;color:var(--text-dim)}.target-selector{align-items:center;display:flex;gap:8px}.target-selector-label{color:#64748b;color:var(--text-dim);font-size:10px;font-weight:500;letter-spacing:.1em;text-transform:uppercase;white-space:nowrap}.target-select,.target-selector-label{font-family:JetBrains Mono,monospace;font-family:var(--font-display)}.target-select{background:#1a1a2e;background:var(--surface-2);border:1px solid #3a3a5a;border:1px solid var(--border-bright);color:#f8fafc;color:var(--text-primary);cursor:pointer;font-size:11px;min-width:180px;outline:none;padding:4px 8px}.target-select:focus{border-color:#38bdf8;border-color:var(--valley)}optgroup{background:#13131d;background:var(--surface);color:#64748b;color:var(--text-dim);font-size:10px}optgroup,option{font-family:JetBrains Mono,monospace;font-family:var(--font-display)}option{background:#1a1a2e;background:var(--surface-2);color:#f8fafc;color:var(--text-primary)}.player-controls{align-items:center;display:flex;flex-shrink:0;gap:6px}.ctrl-btn{background:#1a1a2e;background:var(--surface-2);border:1px solid #3a3a5a;border:1px solid var(--border-bright);color:#f8fafc;color:var(--text-primary);cursor:pointer;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;font-weight:500;letter-spacing:.04em;line-height:1.4;padding:4px 10px;transition:background .1s,border-color .1s;white-space:nowrap}.ctrl-btn:hover:not(:disabled){background:#13131d;background:var(--surface);border-color:#64748b;border-color:var(--text-dim)}.ctrl-btn:disabled{cursor:not-allowed;opacity:.35}.ctrl-btn.play{border-color:#38bdf8;border-color:var(--valley);color:#38bdf8;color:var(--valley)}.ctrl-btn.play:hover:not(:disabled){background:#38bdf81a}.ctrl-step-display{border:1px solid #2a2a3a;border:1px solid var(--border);color:#64748b;color:var(--text-dim);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;min-width:72px;padding:4px 8px;text-align:center;white-space:nowrap}.app-overlay,.ctrl-step-display{background:#0d0d14;background:var(--bg)}.app-overlay{inset:0;justify-content:center;position:fixed;z-index:100}.app-overlay,.overlay-message{align-items:center;display:flex}.overlay-message{color:#64748b;color:var(--text-dim);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:13px;gap:12px;letter-spacing:.1em}.pulse-dot{animation:pulse 1.2s ease-in-out infinite;background:#38bdf8;background:var(--valley);border-radius:50%;height:8px;width:8px}@keyframes pulse{0%,to{opacity:.2;transform:scale(.8)}50%{opacity:1;transform:scale(1)}}.episode-loading{align-items:center;color:#64748b;color:var(--text-dim);display:flex;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;gap:8px;justify-content:center;letter-spacing:.08em;padding:12px 16px}
2
+ /*# sourceMappingURL=main.2ef3bb14.css.map*/
build/static/css/main.2ef3bb14.css.map ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":3,"file":"static/css/main.2ef3bb14.css","mappings":"6IAEA,iBACE,qBAAsB,CACtB,QAAS,CACT,SACF,CAEA,KAME,kCAAmC,CALnC,kBAAmB,CACnB,aAAc,CACd,mCAAuC,CACvC,cAAe,CACf,eAAgB,CAEhB,iBACF,CAEA,oBAEE,UAAW,CADX,SAEF,CAEA,0BACE,kBACF,CAEA,0BACE,kBACF,CAEA,gCACE,kBACF,CCjCA,MACE,YAAa,CACb,iBAAkB,CAClB,mBAAoB,CACpB,qBAAsB,CACtB,oBAAqB,CACrB,kBAAmB,CACnB,gBAAiB,CACjB,wBAAwC,CACxC,+BAA+C,CAC/C,kBAAmB,CACnB,kBAAmB,CACnB,iBAAkB,CAClB,sBAAuB,CACvB,kBAAmB,CACnB,gBAAiB,CACjB,uBAAwB,CACxB,yCAA2C,CAC3C,qCACF,CAEA,KAIE,kBAAqB,CAArB,oBAAqB,CAHrB,YAAa,CACb,qBAAsB,CACtB,YAAa,CAEb,eACF,CAGA,YAEE,kBAAmB,CAKnB,kBAA0B,CAA1B,yBAA0B,CAD1B,+BAAsC,CAAtC,qCAAsC,CALtC,YAAa,CAOb,aAAc,CALd,QAAS,CAET,WAAY,CADZ,cAAe,CAKf,UACF,CAEA,WAKE,aAA0B,CAA1B,yBAA0B,CAJ1B,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAEtB,kBACF,CAEA,yBACE,aAAsB,CAAtB,qBACF,CAEA,YAGE,kBAAyB,CAAzB,wBAAyB,CACzB,aAAc,CAFd,WAAY,CADZ,SAIF,CAEA,cAEE,kBAAmB,CADnB,YAAa,CAEb,QAAS,CACT,gBACF,CAEA,cAKE,oBAAmC,CACnC,0BAAyC,CAEzC,iBAAkB,CAJlB,aAAc,CADd,mBAAqB,CAIrB,eAEF,CAEA,gCATE,oCAAgC,CAAhC,+BAAgC,CADhC,cAoBF,CAVA,kBAKE,gBAAuB,CACvB,wBAAyB,CAEzB,iBAAkB,CAJlB,aAAc,CAKd,cAAe,CANf,oBAAsB,CAItB,gBAGF,CACA,wBAA0C,oBAAqB,CAArC,aAAuC,CAEjE,YAKE,kBAAmB,CADnB,YAAa,CAFb,oCAAgC,CAAhC,+BAAgC,CADhC,cAAe,CAKf,OAAQ,CAHR,oBAIF,CAEA,gBAIE,kBAA2B,CAA3B,0BAA2B,CAD3B,iBAAkB,CADlB,UAAW,CADX,SAIF,CAEA,mBACE,kBAA2B,CAA3B,0BAA2B,CAC3B,0BAAmC,CAAnC,kCACF,CAEA,oBACE,kBAAmB,CACnB,0BACF,CAGA,UACE,YAAa,CAEb,QAAO,CADP,+BAAgC,CAEhC,eACF,CAEA,UAIE,8BAAqC,CAArC,oCACF,CAEA,qBANE,YAAa,CACb,qBAAsB,CACtB,eASF,CALA,WAIE,kBAA0B,CAA1B,yBACF,CAGA,YAKE,+BAAsC,CAAtC,qCAAsC,CAJtC,YAAa,CAGb,aAAc,CAFd,KAAM,CAIN,eAAgB,CAHhB,YAIF,CAEA,aACE,YAAa,CAGb,QAAO,CAFP,qBAAsB,CACtB,OAAQ,CAER,eACF,CAEA,0BACE,gBACF,CAEA,cAKE,aAAsB,CAAtB,qBAAsB,CAJtB,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAEtB,wBACF,CAEA,YAEE,kBAA8B,CAA9B,6BAA8B,CAD9B,aAEF,CAEA,WAEE,kDAA6D,CAC7D,wBAA+B,CAA/B,8BAA+B,CAF/B,aAGF,CAEA,kBAEE,kBAAmB,CADnB,YAAa,CAGb,QAAS,CADT,6BAEF,CAEA,kBAGE,kBAA0B,CAA1B,yBAA0B,CAD1B,wBAA+B,CAA/B,8BAA+B,CAD/B,mBAGF,CAEA,eAEE,gBAAuB,CADvB,WAAY,CAEZ,aAAsB,CAAtB,qBAAsB,CAKtB,cAAe,CAJf,oCAAgC,CAAhC,+BAAgC,CAChC,aAAc,CACd,oBAAsB,CACtB,eAEF,CAEA,8BACE,6BAAoC,CAApC,mCACF,CAEA,sBAEE,kBAAmB,CADnB,aAA0B,CAA1B,yBAEF,CAGA,mBAEE,YAAa,CADb,QAAO,CAEP,qBAAsB,CACtB,eACF,CAEA,gBAQE,+BAAsC,CAAtC,qCAAsC,CAHtC,aAAsB,CAAtB,qBAAsB,CAItB,aAAc,CARd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAGtB,gBAAiB,CADjB,wBAIF,CAEA,WAEE,QAAO,CADP,eAAgB,CAEhB,aACF,CAEA,YAKE,+BAAsC,CAAtC,qCAAsC,CACtC,cAAe,CALf,YAAa,CACb,qBAAsB,CACtB,OAAQ,CACR,gBAAiB,CAGjB,yBACF,CAEA,kBACE,kBAA0B,CAA1B,yBACF,CAEA,mBACE,kBAA4B,CAA5B,2BAA4B,CAC5B,6BAAoC,CAApC,mCAAoC,CACpC,iBACF,CAEA,gBAEE,kBAAmB,CADnB,YAAa,CAEb,OACF,CAEA,UAIE,aAAsB,CAAtB,qBAAsB,CAEtB,aAAc,CALd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAEhB,UAEF,CAEA,kBAEE,aAA0B,CAA1B,yBAA0B,CAC1B,QAAO,CAFP,cAGF,CAEA,cAME,aAAc,CALd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAEhB,eAAgB,CADhB,eAGF,CAEA,gBACE,kBAA2B,CAA3B,0BAA2B,CAC3B,aACF,CAEA,gBACE,kBAAyB,CAAzB,wBAAyB,CACzB,aACF,CAEA,gBACE,kBAAgC,CAAhC,+BAEF,CAEA,mCAHE,aAAsB,CAAtB,qBAOF,CAJA,mBACE,cAAe,CAEf,iBACF,CAEA,mCACE,aAAsB,CAAtB,qBACF,CAEA,mCACE,aACF,CAGA,cAEE,+BAAsC,CAAtC,qCAAsC,CACtC,aAAc,CAFd,iBAGF,CAEA,YAEE,kBAAmB,CADnB,YAAa,CAEb,OAAQ,CACR,iBACF,CAEA,uBACE,eACF,CAEA,cAKE,aAAsB,CAAtB,qBAAsB,CAEtB,aAAc,CANd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAItB,wBAAyB,CAFzB,UAGF,CAEA,cAGE,kBAAqB,CAArB,oBAAqB,CACrB,wBAA+B,CAA/B,8BAA+B,CAH/B,QAAO,CACP,UAAW,CAGX,eACF,CAEA,YACE,WAAY,CACZ,yBACF,CAEA,cAIE,aAA0B,CAA1B,yBAA0B,CAG1B,aAAc,CANd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAGhB,gBAAiB,CADjB,UAGF,CAEA,kBACE,aAAsB,CAAtB,qBACF,CAEA,gBAEE,kBAAyB,CAAzB,wBAAyB,CADzB,UAAW,CAEX,YACF,CAGA,aAEE,YAAa,CACb,qBAAsB,CACtB,OAAQ,CAHR,iBAIF,CAEA,UAEE,kBAAmB,CADnB,YAAa,CAGb,OAAQ,CADR,6BAEF,CAEA,UAKE,aAAsB,CAAtB,qBAAsB,CAHtB,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAEtB,wBACF,CAEA,oBARE,oCAAgC,CAAhC,+BAaF,CALA,UAIE,aAA0B,CAA1B,yBAA0B,CAF1B,cAAe,CACf,eAEF,CAEA,oBACE,aAAsB,CAAtB,qBACF,CAEA,qBACE,aACF,CAEA,cACE,aAAsB,CAAtB,qBACF,CAGA,iBAEE,kBAAmB,CADnB,YAAa,CAEb,OACF,CAEA,uBAKE,aAAsB,CAAtB,qBAAsB,CAHtB,cAAe,CACf,eAAgB,CAChB,mBAAsB,CAEtB,wBAAyB,CACzB,kBACF,CAEA,sCATE,oCAAgC,CAAhC,+BAmBF,CAVA,eACE,kBAA4B,CAA5B,2BAA4B,CAC5B,wBAAsC,CAAtC,qCAAsC,CACtC,aAA0B,CAA1B,yBAA0B,CAK1B,cAAe,CAHf,cAAe,CAIf,eAAgB,CAFhB,YAAa,CADb,eAIF,CAEA,qBACE,oBAA2B,CAA3B,0BACF,CAEA,SACE,kBAA0B,CAA1B,yBAA0B,CAC1B,aAAsB,CAAtB,qBAAsB,CAEtB,cACF,CAEA,gBAJE,oCAAgC,CAAhC,+BAQF,CAJA,OACE,kBAA4B,CAA5B,2BAA4B,CAC5B,aAA0B,CAA1B,yBAEF,CAGA,iBAEE,kBAAmB,CADnB,YAAa,CAGb,aAAc,CADd,OAEF,CAEA,UACE,kBAA4B,CAA5B,2BAA4B,CAC5B,wBAAsC,CAAtC,qCAAsC,CACtC,aAA0B,CAA1B,yBAA0B,CAK1B,cAAe,CAJf,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAKhB,oBAAsB,CADtB,eAAgB,CAHhB,gBAAiB,CAKjB,0CAA8C,CAH9C,kBAIF,CAEA,+BACE,kBAA0B,CAA1B,yBAA0B,CAC1B,oBAA6B,CAA7B,4BACF,CAEA,mBAEE,kBAAmB,CADnB,WAEF,CAEA,eACE,oBAA2B,CAA3B,0BAA2B,CAC3B,aAAoB,CAApB,mBACF,CAEA,oCACE,oBACF,CAEA,mBAKE,wBAA+B,CAA/B,8BAA+B,CAF/B,aAAsB,CAAtB,qBAAsB,CAFtB,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CAMf,cAAe,CAJf,eAAgB,CAKhB,iBAAkB,CAFlB,kBAGF,CAGA,gCAPE,kBAAqB,CAArB,oBAeF,CARA,aAEE,OAAQ,CAGR,sBAAuB,CAJvB,cAAe,CAMf,WACF,CAEA,8BANE,kBAAmB,CADnB,YAeF,CARA,iBAIE,aAAsB,CAAtB,qBAAsB,CAHtB,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CAKf,QAAS,CAJT,mBAKF,CAEA,WAKE,yCAA0C,CAD1C,kBAAyB,CAAzB,wBAAyB,CADzB,iBAAkB,CADlB,UAAW,CADX,SAKF,CAEA,iBACE,MAAW,UAAY,CAAE,mBAAuB,CAChD,IAAM,SAAU,CAAE,kBAAqB,CACzC,CAGA,iBAEE,kBAAmB,CAMnB,aAAsB,CAAtB,qBAAsB,CAPtB,YAAa,CAKb,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CAHf,OAAQ,CADR,sBAAuB,CAMvB,oBAAsB,CAJtB,iBAKF","sources":["index.css","App.css"],"sourcesContent":["@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&family=IBM+Plex+Mono:wght@300;400;500&display=swap');\n\n*, *::before, *::after {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n}\n\nbody {\n background: #0d0d14;\n color: #f8fafc;\n font-family: 'IBM Plex Mono', monospace;\n font-size: 13px;\n line-height: 1.5;\n -webkit-font-smoothing: antialiased;\n overflow-x: hidden;\n}\n\n::-webkit-scrollbar {\n width: 4px;\n height: 4px;\n}\n\n::-webkit-scrollbar-track {\n background: #0d0d14;\n}\n\n::-webkit-scrollbar-thumb {\n background: #2a2a3a;\n}\n\n::-webkit-scrollbar-thumb:hover {\n background: #3a3a5a;\n}\n",":root {\n --bg: #0d0d14;\n --surface: #13131d;\n --surface-2: #1a1a2e;\n --paper-white: #fafaf5;\n --paper-edge: #2a2a3a;\n --mountain: #f59e0b;\n --valley: #38bdf8;\n --target-ghost: rgba(124, 58, 237, 0.20);\n --target-ghost-stroke: rgba(124, 58, 237, 0.45);\n --validity: #22d3ee;\n --progress: #22c55e;\n --economy: #a78bfa;\n --text-primary: #f8fafc;\n --text-dim: #64748b;\n --border: #2a2a3a;\n --border-bright: #3a3a5a;\n --font-display: 'JetBrains Mono', monospace;\n --font-mono: 'IBM Plex Mono', monospace;\n}\n\n.app {\n display: flex;\n flex-direction: column;\n height: 100vh;\n background: var(--bg);\n overflow: hidden;\n}\n\n/* ─── HEADER ─── */\n.app-header {\n display: flex;\n align-items: center;\n gap: 24px;\n padding: 0 20px;\n height: 48px;\n border-bottom: 1px solid var(--border);\n background: var(--surface);\n flex-shrink: 0;\n z-index: 10;\n}\n\n.app-title {\n font-family: var(--font-display);\n font-size: 14px;\n font-weight: 700;\n letter-spacing: 0.12em;\n color: var(--text-primary);\n white-space: nowrap;\n}\n\n.app-title .title-accent {\n color: var(--mountain);\n}\n\n.header-sep {\n width: 1px;\n height: 24px;\n background: var(--border);\n flex-shrink: 0;\n}\n\n.header-right {\n display: flex;\n align-items: center;\n gap: 16px;\n margin-left: auto;\n}\n\n.replay-badge {\n font-size: 10px;\n font-family: var(--font-display);\n letter-spacing: 0.1em;\n color: #38bdf8;\n background: rgba(56, 189, 248, 0.1);\n border: 1px solid rgba(56, 189, 248, 0.3);\n padding: 3px 8px;\n border-radius: 3px;\n}\n\n.back-to-grid-btn {\n font-size: 10px;\n font-family: var(--font-display);\n letter-spacing: 0.08em;\n color: #64748b;\n background: transparent;\n border: 1px solid #1e2a3a;\n padding: 3px 10px;\n border-radius: 3px;\n cursor: pointer;\n}\n.back-to-grid-btn:hover { color: #e2e8f0; border-color: #64748b; }\n\n.api-status {\n font-size: 11px;\n font-family: var(--font-display);\n letter-spacing: 0.08em;\n display: flex;\n align-items: center;\n gap: 6px;\n}\n\n.api-status-dot {\n width: 6px;\n height: 6px;\n border-radius: 50%;\n background: var(--text-dim);\n}\n\n.api-status-dot.ok {\n background: var(--progress);\n box-shadow: 0 0 6px var(--progress);\n}\n\n.api-status-dot.err {\n background: #ef4444;\n box-shadow: 0 0 6px #ef4444;\n}\n\n/* ─── MAIN LAYOUT ─── */\n.app-body {\n display: grid;\n grid-template-columns: 1fr 280px;\n flex: 1;\n overflow: hidden;\n}\n\n.app-left {\n display: flex;\n flex-direction: column;\n overflow: hidden;\n border-right: 1px solid var(--border);\n}\n\n.app-right {\n display: flex;\n flex-direction: column;\n overflow: hidden;\n background: var(--surface);\n}\n\n/* ─── CANVAS ROW ─── */\n.canvas-row {\n display: flex;\n gap: 0;\n padding: 16px;\n flex-shrink: 0;\n border-bottom: 1px solid var(--border);\n overflow-x: auto;\n}\n\n.canvas-wrap {\n display: flex;\n flex-direction: column;\n gap: 8px;\n flex: 1;\n min-width: 280px;\n}\n\n.canvas-wrap + .canvas-wrap {\n margin-left: 16px;\n}\n\n.canvas-label {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.14em;\n color: var(--text-dim);\n text-transform: uppercase;\n}\n\n.canvas-svg {\n display: block;\n background: var(--paper-white);\n}\n\n.canvas-3d {\n display: block;\n background: linear-gradient(180deg, #1a1a2e 0%, #0f101a 100%);\n border: 1px solid var(--border);\n}\n\n.canvas-label-row {\n display: flex;\n align-items: center;\n justify-content: space-between;\n gap: 10px;\n}\n\n.fold-mode-toggle {\n display: inline-flex;\n border: 1px solid var(--border);\n background: var(--surface);\n}\n\n.fold-mode-btn {\n border: none;\n background: transparent;\n color: var(--text-dim);\n font-family: var(--font-display);\n font-size: 9px;\n letter-spacing: 0.08em;\n padding: 3px 7px;\n cursor: pointer;\n}\n\n.fold-mode-btn + .fold-mode-btn {\n border-left: 1px solid var(--border);\n}\n\n.fold-mode-btn.active {\n color: var(--text-primary);\n background: #1f2538;\n}\n\n/* ─── STEP FEED ─── */\n.step-feed-section {\n flex: 1;\n display: flex;\n flex-direction: column;\n overflow: hidden;\n}\n\n.section-header {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.14em;\n color: var(--text-dim);\n text-transform: uppercase;\n padding: 8px 16px;\n border-bottom: 1px solid var(--border);\n flex-shrink: 0;\n}\n\n.step-feed {\n overflow-y: auto;\n flex: 1;\n padding: 4px 0;\n}\n\n.step-entry {\n display: flex;\n flex-direction: column;\n gap: 2px;\n padding: 8px 16px;\n border-bottom: 1px solid var(--border);\n cursor: default;\n transition: background 0.1s;\n}\n\n.step-entry:hover {\n background: var(--surface);\n}\n\n.step-entry.active {\n background: var(--surface-2);\n border-left: 2px solid var(--valley);\n padding-left: 14px;\n}\n\n.step-entry-top {\n display: flex;\n align-items: center;\n gap: 8px;\n}\n\n.step-num {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 700;\n color: var(--text-dim);\n width: 24px;\n flex-shrink: 0;\n}\n\n.step-instruction {\n font-size: 12px;\n color: var(--text-primary);\n flex: 1;\n}\n\n.assign-badge {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 700;\n padding: 1px 5px;\n line-height: 1.4;\n flex-shrink: 0;\n}\n\n.assign-badge.M {\n background: var(--mountain);\n color: #0d0d14;\n}\n\n.assign-badge.V {\n background: var(--valley);\n color: #0d0d14;\n}\n\n.assign-badge.B {\n background: var(--border-bright);\n color: var(--text-dim);\n}\n\n.step-reward-delta {\n font-size: 11px;\n color: var(--text-dim);\n padding-left: 32px;\n}\n\n.step-reward-delta .delta-positive {\n color: var(--progress);\n}\n\n.step-reward-delta .delta-negative {\n color: #ef4444;\n}\n\n/* ─── REWARD PANEL ─── */\n.reward-panel {\n padding: 12px 16px;\n border-bottom: 1px solid var(--border);\n flex-shrink: 0;\n}\n\n.reward-row {\n display: flex;\n align-items: center;\n gap: 8px;\n margin-bottom: 6px;\n}\n\n.reward-row:last-child {\n margin-bottom: 0;\n}\n\n.reward-label {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.06em;\n color: var(--text-dim);\n width: 72px;\n flex-shrink: 0;\n text-transform: uppercase;\n}\n\n.reward-track {\n flex: 1;\n height: 8px;\n background: var(--bg);\n border: 1px solid var(--border);\n overflow: hidden;\n}\n\n.reward-bar {\n height: 100%;\n transition: width 0.4s ease;\n}\n\n.reward-value {\n font-family: var(--font-display);\n font-size: 11px;\n font-weight: 500;\n color: var(--text-primary);\n width: 36px;\n text-align: right;\n flex-shrink: 0;\n}\n\n.reward-value.dim {\n color: var(--text-dim);\n}\n\n.reward-divider {\n height: 1px;\n background: var(--border);\n margin: 6px 0;\n}\n\n/* ─── INFO BADGES ─── */\n.info-badges {\n padding: 12px 16px;\n display: flex;\n flex-direction: column;\n gap: 8px;\n}\n\n.info-row {\n display: flex;\n align-items: center;\n justify-content: space-between;\n gap: 8px;\n}\n\n.info-key {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.06em;\n color: var(--text-dim);\n text-transform: uppercase;\n}\n\n.info-val {\n font-family: var(--font-display);\n font-size: 11px;\n font-weight: 700;\n color: var(--text-primary);\n}\n\n.info-val.bool-true {\n color: var(--progress);\n}\n\n.info-val.bool-false {\n color: #ef4444;\n}\n\n.info-val.dim {\n color: var(--text-dim);\n}\n\n/* ─── TARGET SELECTOR ─── */\n.target-selector {\n display: flex;\n align-items: center;\n gap: 8px;\n}\n\n.target-selector-label {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.10em;\n color: var(--text-dim);\n text-transform: uppercase;\n white-space: nowrap;\n}\n\n.target-select {\n background: var(--surface-2);\n border: 1px solid var(--border-bright);\n color: var(--text-primary);\n font-family: var(--font-display);\n font-size: 11px;\n padding: 4px 8px;\n outline: none;\n cursor: pointer;\n min-width: 180px;\n}\n\n.target-select:focus {\n border-color: var(--valley);\n}\n\noptgroup {\n background: var(--surface);\n color: var(--text-dim);\n font-family: var(--font-display);\n font-size: 10px;\n}\n\noption {\n background: var(--surface-2);\n color: var(--text-primary);\n font-family: var(--font-display);\n}\n\n/* ─── PLAYER CONTROLS ─── */\n.player-controls {\n display: flex;\n align-items: center;\n gap: 6px;\n flex-shrink: 0;\n}\n\n.ctrl-btn {\n background: var(--surface-2);\n border: 1px solid var(--border-bright);\n color: var(--text-primary);\n font-family: var(--font-display);\n font-size: 11px;\n font-weight: 500;\n padding: 4px 10px;\n cursor: pointer;\n white-space: nowrap;\n line-height: 1.4;\n letter-spacing: 0.04em;\n transition: background 0.1s, border-color 0.1s;\n}\n\n.ctrl-btn:hover:not(:disabled) {\n background: var(--surface);\n border-color: var(--text-dim);\n}\n\n.ctrl-btn:disabled {\n opacity: 0.35;\n cursor: not-allowed;\n}\n\n.ctrl-btn.play {\n border-color: var(--valley);\n color: var(--valley);\n}\n\n.ctrl-btn.play:hover:not(:disabled) {\n background: rgba(56, 189, 248, 0.1);\n}\n\n.ctrl-step-display {\n font-family: var(--font-display);\n font-size: 11px;\n color: var(--text-dim);\n padding: 4px 8px;\n border: 1px solid var(--border);\n background: var(--bg);\n white-space: nowrap;\n min-width: 72px;\n text-align: center;\n}\n\n/* ─── LOADING / ERROR ─── */\n.app-overlay {\n position: fixed;\n inset: 0;\n display: flex;\n align-items: center;\n justify-content: center;\n background: var(--bg);\n z-index: 100;\n}\n\n.overlay-message {\n font-family: var(--font-display);\n font-size: 13px;\n letter-spacing: 0.1em;\n color: var(--text-dim);\n display: flex;\n align-items: center;\n gap: 12px;\n}\n\n.pulse-dot {\n width: 8px;\n height: 8px;\n border-radius: 50%;\n background: var(--valley);\n animation: pulse 1.2s ease-in-out infinite;\n}\n\n@keyframes pulse {\n 0%, 100% { opacity: 0.2; transform: scale(0.8); }\n 50% { opacity: 1; transform: scale(1); }\n}\n\n/* ─── MISC ─── */\n.episode-loading {\n display: flex;\n align-items: center;\n justify-content: center;\n gap: 8px;\n padding: 12px 16px;\n font-family: var(--font-display);\n font-size: 11px;\n color: var(--text-dim);\n letter-spacing: 0.08em;\n}\n"],"names":[],"sourceRoot":""}
build/static/js/main.b94094b6.js ADDED
The diff for this file is too large to render. See raw diff
 
build/static/js/main.b94094b6.js.LICENSE.txt ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license React
3
+ * react-dom-client.production.js
4
+ *
5
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
6
+ *
7
+ * This source code is licensed under the MIT license found in the
8
+ * LICENSE file in the root directory of this source tree.
9
+ */
10
+
11
+ /**
12
+ * @license React
13
+ * react-dom.production.js
14
+ *
15
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
16
+ *
17
+ * This source code is licensed under the MIT license found in the
18
+ * LICENSE file in the root directory of this source tree.
19
+ */
20
+
21
+ /**
22
+ * @license React
23
+ * react-jsx-runtime.production.js
24
+ *
25
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
26
+ *
27
+ * This source code is licensed under the MIT license found in the
28
+ * LICENSE file in the root directory of this source tree.
29
+ */
30
+
31
+ /**
32
+ * @license React
33
+ * react.production.js
34
+ *
35
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
36
+ *
37
+ * This source code is licensed under the MIT license found in the
38
+ * LICENSE file in the root directory of this source tree.
39
+ */
40
+
41
+ /**
42
+ * @license React
43
+ * scheduler.production.js
44
+ *
45
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
46
+ *
47
+ * This source code is licensed under the MIT license found in the
48
+ * LICENSE file in the root directory of this source tree.
49
+ */
build/static/js/main.b94094b6.js.map ADDED
The diff for this file is too large to render. See raw diff
 
docs/optigami_handoff.md ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OrigamiRL — OpenEnv Hackathon Handoff Document
2
+
3
+ ## TL;DR
4
+
5
+ Build the **first multi-turn RL environment where an LLM learns to generate origami folding instructions**, verified by a computational origami simulator. Target the OpenEnv Hackathon (March 7-8, 2026, SF — $100K+ in prizes). Use OpenEnv spec + Unsloth GRPO for training. Dense verifiable rewards from origami geometry theorems (Kawasaki, Maekawa). No learned reward model needed.
6
+
7
+ ---
8
+
9
+ ## Hackathon Context
10
+
11
+ - **Event:** OpenEnv Hackathon SF, hosted by Cerebral Valley + Shack15 + Meta/PyTorch
12
+ - **Date:** March 7-8, 2026 (happening NOW)
13
+ - **Prize:** $100K+ cash
14
+ - **Teams:** Up to 4 people
15
+ - **Format:** Build RL environments, post-train a base model
16
+
17
+ ### Judging Criteria
18
+
19
+ | Category | Weight | What Matters |
20
+ |----------|--------|-------------|
21
+ | Environment Innovation | 40% | Novel, creative, challenging. Does it meaningfully test agent behavior? |
22
+ | Storytelling | 30% | Clear problem explanation, engaging demo, easy to follow |
23
+ | Training Script Showing Improvement | 20% | Observable reward curves, before/after behavior |
24
+ | Reward and Training Pipeline Setup | 10% | Coherent reward logic, meaningful improvement in inference |
25
+
26
+ ### Key Sponsors to Impress
27
+
28
+ - **Meta/PyTorch** — OpenEnv creators, want environments using their spec
29
+ - **Unsloth AI** — GRPO training infra, ART (Agent Reinforcement Trainer). USE THEIR TOOLS.
30
+ - **OpenPipe** — ART trainer (frontend/backend split for GRPO). Also use.
31
+ - **Patronus AI** — Building "generative simulators" (auto-scaling RL environments). They care about curriculum difficulty scaling and verifiable rewards.
32
+ - **Snorkel AI** — "2026 is the year of environments." They care about data quality and environment diversity.
33
+ - **Hugging Face** — OpenEnv Hub, want environments deployed there
34
+ - **Scale AI / Mercor** — Agent evaluation, structured task environments
35
+
36
+ ---
37
+
38
+ ## The Pitch (for judges)
39
+
40
+ > "Spatial reasoning is the next frontier for LLM training — NeurIPS 2025 papers like OrigamiSpace showed that even GPT-5 fails at multi-step origami reasoning. But those are benchmarks, not training environments. We built OrigamiRL: the first multi-turn RL environment where an LLM agent learns to fold paper by outputting instructions, receiving geometric feedback, and improving through GRPO. Our reward function is fully verifiable — fold validity is checked against computational origami axioms, not an LLM judge. We built it on OpenEnv + Unsloth with a natural curriculum from single folds to full cranes."
41
+
42
+ ---
43
+
44
+ ## Prior Work (What Exists, Where the Gaps Are)
45
+
46
+ ### 1. OrigamiSpace (NeurIPS 2025 Spotlight)
47
+
48
+ - **Paper:** https://arxiv.org/abs/2511.18450
49
+ - **What it is:** Benchmark with 350 origami data instances (CP diagrams, folding processes, folded shapes). 4 evaluation tasks: Pattern Prediction, Multi-step Spatial Reasoning, Spatial Relationship Prediction, End-to-End CP Code Generation.
50
+ - **Their compiler:** Outputs detailed flattened diagrams with crease locations and stacking relationships, supports interactive simulation with MLLMs, provides comprehensive error feedback. Checks: syntax validity, geometric foldability, no self-intersections, Kawasaki's theorem, Maekawa's theorem.
51
+ - **Their reward metrics for code gen:** Hausdorff distance (shape similarity), dihedral angle distribution, bounding box aspect ratios, constraint satisfaction.
52
+ - **Difficulty levels:** Easy (3-9 steps), Medium (10-19 steps), Hard (20-30 steps)
53
+ - **Gap:** Single-turn only (LLM generates complete CP code in one shot). They mention RL exploration but it's not the focus. No multi-turn sequential folding.
54
+
55
+ ### 2. GamiBench (Dec 2025)
56
+
57
+ - **Paper:** https://arxiv.org/abs/2512.22207
58
+ - **What it is:** 186 regular + 186 impossible 2D crease patterns with 3D folded shapes from 6 viewpoints. 3 VQA tasks.
59
+ - **Gap:** Evaluation-only, no training. Tests single-step spatial understanding.
60
+
61
+ ### 3. SpatialThinker (NeurIPS 2025)
62
+
63
+ - **Paper:** https://arxiv.org/abs/2511.07403
64
+ - **What it is:** 3D-aware MLLM trained with RL using dense spatial rewards. Constructs scene graphs. Multi-objective reward with lexicographic gating.
65
+ - **Key architecture to steal:** Dense reward design with lexicographic ordering — format → count → accuracy → spatial. Nearly doubled RL training gains vs sparse rewards. Only needed 7K training samples with GRPO.
66
+ - **Gap:** Static scene understanding (objects on a table), not sequential physical transformations.
67
+
68
+ ### 4. rigid-origami Gym (IJCAI 2023)
69
+
70
+ - **Repo:** https://github.com/belalugaX/rigid-origami
71
+ - **Paper:** "Automating Rigid Origami Design" (https://arxiv.org/abs/2211.13219)
72
+ - **What it is:** Gym environment where agent constructs crease pattern graphs on a board. Sparse rewards. Foldability validated by triangle intersection tests + kinematic rigidity model. Game terminates on non-foldable states.
73
+ - **Gap:** Classical RL agents (discrete grid actions), NOT LLMs generating text. Rigid-origami tessellations only, not traditional origami. No natural language.
74
+
75
+ ### 5. The Unique Gap We Fill
76
+
77
+ Nobody has built a model that reasons about **sequential 2D-to-3D geometric transformations with physical constraints** through **natural language instructions** in a **multi-turn RL training loop**. Origami is uniquely hard because it requires tracking how a flat sheet's topology changes through a sequence of folds — mental rotation, spatial visualization, and perspective-taking all at once.
78
+
79
+ ---
80
+
81
+ ## Environment Design
82
+
83
+ ### Architecture Overview
84
+
85
+ ```
86
+ +---------------------------------------------------+
87
+ | OpenEnv Server |
88
+ | +-----------+ +----------+ +--------------+ |
89
+ | | State | | Action | | Reward | |
90
+ | | (FOLD JSON| | (LLM | | (Dense, | |
91
+ | | + target)| | output) | | verifiable) | |
92
+ | +-----------+ +----------+ +--------------+ |
93
+ | | | | |
94
+ | v v v |
95
+ | +-----------------------------------------------+|
96
+ | | Paper Geometry Engine (Python) ||
97
+ | | - Polygon state (Shapely) ||
98
+ | | - Fold operations (reflection across line) ||
99
+ | | - Kawasaki/Maekawa constraint checks ||
100
+ | | - Layer tracking ||
101
+ | | - FOLD format import/export ||
102
+ | +-----------------------------------------------+|
103
+ | | |
104
+ | v |
105
+ | +-----------------------------------------------+|
106
+ | | Three.js Visualizer (Demo only) ||
107
+ | | - 3D fold animation ||
108
+ | | - Strain heatmap ||
109
+ | | - Instruction stream ||
110
+ | +-----------------------------------------------+|
111
+ +---------------------------------------------------+
112
+ | ^
113
+ v |
114
+ +---------------------------------------------------+
115
+ | Unsloth ART / GRPO Trainer |
116
+ | - Qwen2.5-VL-7B or Qwen3-4B base model |
117
+ | - LoRA/QLoRA for efficient training |
118
+ | - Multi-turn rollouts |
119
+ +---------------------------------------------------+
120
+ ```
121
+
122
+ ### OpenEnv Spec Compliance
123
+
124
+ Must implement these APIs:
125
+
126
+ ```python
127
+ class OrigamiEnv:
128
+ async def reset() -> Observation # New episode: flat paper + target
129
+ async def step(action) -> (Observation, reward, done, info)
130
+ async def state() -> State # Current paper geometry
131
+ async def close() # Cleanup
132
+ ```
133
+
134
+ OpenEnv repo: https://github.com/meta-pytorch/OpenEnv
135
+ Install: `pip install -e .` then `openenv init origami_env`
136
+
137
+ ### State Space
138
+
139
+ ```python
140
+ @dataclass
141
+ class OrigamiState:
142
+ # Current paper geometry
143
+ vertices: List[Tuple[float, float]] # 2D vertex positions
144
+ edges: List[Tuple[int, int]] # Edge connectivity
145
+ edges_assignment: List[str] # 'M', 'V', 'B', 'F' (mountain/valley/boundary/flat)
146
+ edges_foldAngle: List[float] # -180 to 180 degrees
147
+ faces: List[List[int]] # Face vertex indices
148
+ layer_order: List[List[int]] # Face stacking order
149
+
150
+ # Episode context
151
+ target_crease_pattern: dict # Target FOLD JSON
152
+ target_shape_image: Optional[np.ndarray] # Target folded shape (for multimodal)
153
+ instruction_history: List[str] # Previous instructions
154
+ step_count: int
155
+ max_steps: int
156
+ ```
157
+
158
+ This maps directly to the **FOLD format** (JSON-based, used by all origami software):
159
+
160
+ ```json
161
+ {
162
+ "vertices_coords": [[0,0], [1,0], [1,1], [0,1]],
163
+ "edges_vertices": [[0,1], [1,2], [2,3], [3,0]],
164
+ "edges_assignment": ["B", "B", "B", "B"],
165
+ "edges_foldAngle": [0, 0, 0, 0],
166
+ "faces_vertices": [[0, 1, 2, 3]]
167
+ }
168
+ ```
169
+
170
+ FOLD spec: https://github.com/edemaine/fold
171
+ FOLD JS library: https://edemaine.github.io/fold/
172
+
173
+ ### Action Space
174
+
175
+ The LLM outputs a JSON action:
176
+
177
+ ```json
178
+ {
179
+ "instruction": "Fold the top edge down to meet the bottom edge",
180
+ "fold_line": [[0, 0.5], [1, 0.5]],
181
+ "fold_angle": -180,
182
+ "assignment": "V"
183
+ }
184
+ ```
185
+
186
+ The `instruction` field is natural language (what we're training the model to produce well). The geometric fields are the verifiable representation. During training, the model outputs both; for the final demo, the NL instruction is the star.
187
+
188
+ Alternative simpler action (for early iterations):
189
+
190
+ ```json
191
+ {
192
+ "instruction": "Valley fold along the horizontal center line",
193
+ "fold_type": "valley",
194
+ "fold_axis": "horizontal",
195
+ "fold_position": 0.5
196
+ }
197
+ ```
198
+
199
+ ### Reward Function — Dense, Multi-Objective, Lexicographically Gated
200
+
201
+ Inspired by SpatialThinker's design. Rewards are computed in order; later rewards only apply if earlier gates pass.
202
+
203
+ ```python
204
+ def compute_reward(state, action, new_state, target) -> dict:
205
+ rewards = {}
206
+
207
+ # LEVEL 1: Format (gate for everything else)
208
+ # Does the output parse into a valid fold operation?
209
+ rewards['format'] = 1.0 if parseable(action) else 0.0
210
+ if rewards['format'] == 0:
211
+ return rewards # Stop here
212
+
213
+ # LEVEL 2: Local Geometric Validity
214
+ # Kawasaki's theorem: sector angles at each interior vertex sum to 2pi
215
+ kawasaki_valid = check_kawasaki(new_state)
216
+ # Maekawa's theorem: |M - V| = 2 at each interior vertex
217
+ maekawa_valid = check_maekawa(new_state)
218
+ # No self-intersection
219
+ no_intersection = check_no_self_intersection(new_state)
220
+ rewards['validity'] = (kawasaki_valid + maekawa_valid + no_intersection) / 3.0
221
+ if rewards['validity'] < 0.5:
222
+ return rewards # Stop here
223
+
224
+ # LEVEL 3: Physical Feasibility
225
+ # Can this fold actually be performed given layer stack?
226
+ layer_consistent = check_layer_ordering(new_state)
227
+ fold_achievable = check_fold_angle_feasible(new_state)
228
+ rewards['feasibility'] = (layer_consistent + fold_achievable) / 2.0
229
+
230
+ # LEVEL 4: Progress Toward Target (Dense)
231
+ # Crease pattern graph similarity
232
+ cp_similarity = crease_pattern_similarity(new_state, target)
233
+ # Fold angle distribution match
234
+ angle_similarity = fold_angle_distribution_match(new_state, target)
235
+ # Bounding box aspect ratio match
236
+ bbox_similarity = bounding_box_similarity(new_state, target)
237
+ rewards['progress'] = 0.4 * cp_similarity + 0.4 * angle_similarity + 0.2 * bbox_similarity
238
+
239
+ # LEVEL 5: Completion Bonus
240
+ if shape_matches_target(new_state, target, tolerance=0.05):
241
+ rewards['completion'] = 10.0
242
+
243
+ # LEVEL 6: Efficiency
244
+ rewards['efficiency'] = -0.01 # Small step penalty to encourage fewer folds
245
+
246
+ # Total
247
+ rewards['total'] = (
248
+ 0.1 * rewards['format'] +
249
+ 0.2 * rewards['validity'] +
250
+ 0.1 * rewards['feasibility'] +
251
+ 0.5 * rewards['progress'] +
252
+ rewards.get('completion', 0) +
253
+ rewards['efficiency']
254
+ )
255
+ return rewards
256
+ ```
257
+
258
+ ### Key Origami Theorems for Verification
259
+
260
+ These are the verifiable constraints — the "unit tests" of origami:
261
+
262
+ 1. **Kawasaki's Theorem:** At any interior vertex of a flat-foldable crease pattern, the alternating sum of sector angles equals zero (equivalently, they sum to 2pi on each side). NECESSARY condition for flat-foldability.
263
+
264
+ 2. **Maekawa's Theorem:** At any interior vertex, the number of mountain folds minus valley folds equals +/-2. |M - V| = 2.
265
+
266
+ 3. **No self-intersection:** Faces cannot penetrate each other during folding.
267
+
268
+ 4. **Euler's formula for planar graphs:** V - E + F = 2 (sanity check on graph structure).
269
+
270
+ 5. **Huzita-Hatori axioms:** The 7 axioms defining all possible single-fold operations (point-to-point, point-to-line, line-to-line, etc.). These define the VALID action space.
271
+
272
+ ### Curriculum Design
273
+
274
+ | Level | Folds | Examples | Complexity |
275
+ |-------|-------|----------|-----------|
276
+ | 1 | 1 | Valley fold in half, mountain fold corner | Single fold validity |
277
+ | 2 | 2-3 | Paper airplane nose, triangle fold | Sequential dependency |
278
+ | 3 | 4-6 | Simple boat, fortune teller | Multi-step with symmetry |
279
+ | 4 | 7-12 | Paper airplane (full), jumping frog | Longer horizon planning |
280
+ | 5 | 13-20 | Crane, lily | Complex spatial tracking |
281
+
282
+ For the hackathon, focus on Levels 1-3. Even showing reward improvement on Level 1-2 is a strong result.
283
+
284
+ ---
285
+
286
+ ## Core Implementation: Python Geometry Engine
287
+
288
+ This is the MOST IMPORTANT piece. Pure Python, no JS dependencies.
289
+
290
+ ```python
291
+ import numpy as np
292
+ from shapely.geometry import Polygon, LineString, MultiPolygon
293
+ from shapely.ops import split
294
+ from typing import List, Tuple, Dict
295
+ import json
296
+
297
+ class PaperState:
298
+ """Represents the current state of the origami paper."""
299
+
300
+ def __init__(self, size: float = 1.0):
301
+ # Start with a unit square
302
+ self.regions = [Polygon([(0,0), (size,0), (size,size), (0,size)])]
303
+ self.fold_history = []
304
+ self.crease_lines = []
305
+ self.crease_assignments = [] # 'M' or 'V'
306
+ self.crease_angles = []
307
+ self.layer_order = [0] # Stack order of regions
308
+
309
+ def apply_fold(self, fold_line: LineString, angle: float, assignment: str) -> dict:
310
+ """
311
+ Apply a fold operation. Returns dict with validity info.
312
+ fold_line: Shapely LineString defining the fold axis
313
+ angle: fold angle in degrees (-180 to 180)
314
+ assignment: 'M' (mountain) or 'V' (valley)
315
+ """
316
+ result = {'valid': True, 'errors': []}
317
+
318
+ # 1. Split regions by fold line
319
+ new_regions = []
320
+ for region in self.regions:
321
+ if fold_line.intersects(region):
322
+ parts = split(region, fold_line)
323
+ new_regions.extend(parts.geoms)
324
+ else:
325
+ new_regions.append(region)
326
+
327
+ # 2. Determine which side folds (based on assignment)
328
+ folding_side = []
329
+ staying_side = []
330
+ for region in new_regions:
331
+ centroid = region.centroid
332
+ side = self._point_side(centroid, fold_line)
333
+ if side > 0:
334
+ folding_side.append(region)
335
+ else:
336
+ staying_side.append(region)
337
+
338
+ # 3. Reflect folding regions across fold line
339
+ reflected = [self._reflect_polygon(r, fold_line) for r in folding_side]
340
+
341
+ # 4. Update state
342
+ self.regions = staying_side + reflected
343
+ self.crease_lines.append(fold_line)
344
+ self.crease_assignments.append(assignment)
345
+ self.crease_angles.append(angle)
346
+ self.fold_history.append({
347
+ 'line': list(fold_line.coords),
348
+ 'angle': angle,
349
+ 'assignment': assignment
350
+ })
351
+
352
+ # 5. Update layer order
353
+ self._update_layer_order(staying_side, reflected)
354
+
355
+ return result
356
+
357
+ def _reflect_polygon(self, poly: Polygon, line: LineString) -> Polygon:
358
+ """Reflect a polygon across a line."""
359
+ coords = list(poly.exterior.coords)
360
+ reflected_coords = [self._reflect_point(p, line) for p in coords]
361
+ return Polygon(reflected_coords)
362
+
363
+ def _reflect_point(self, point: tuple, line: LineString) -> tuple:
364
+ """Reflect a point across a line."""
365
+ p = np.array(point[:2])
366
+ l1 = np.array(line.coords[0])
367
+ l2 = np.array(line.coords[1])
368
+ d = l2 - l1
369
+ d = d / np.linalg.norm(d)
370
+ # Reflection formula: p' = p - 2(p-l1).n * n where n is normal to line
371
+ n = np.array([-d[1], d[0]])
372
+ v = p - l1
373
+ return tuple(p - 2 * np.dot(v, n) * n)
374
+
375
+ def _point_side(self, point, line: LineString) -> float:
376
+ """Returns positive if point is on left side of line, negative if right."""
377
+ p = np.array([point.x, point.y])
378
+ l1 = np.array(line.coords[0])
379
+ l2 = np.array(line.coords[1])
380
+ return float(np.cross(l2 - l1, p - l1))
381
+
382
+ def _update_layer_order(self, staying, reflected):
383
+ """Update the layer stacking order after a fold."""
384
+ self.layer_order = list(range(len(staying))) + \
385
+ list(range(len(staying), len(staying) + len(reflected)))
386
+
387
+ def to_fold_json(self) -> dict:
388
+ """Export current state as FOLD format JSON."""
389
+ vertices = set()
390
+ for line in self.crease_lines:
391
+ for coord in line.coords:
392
+ vertices.add(tuple(round(c, 10) for c in coord))
393
+ # Add boundary vertices
394
+ for region in self.regions:
395
+ for coord in region.exterior.coords:
396
+ vertices.add(tuple(round(c, 10) for c in coord[:2]))
397
+
398
+ vertices = sorted(list(vertices))
399
+ vertex_map = {v: i for i, v in enumerate(vertices)}
400
+
401
+ edge_set = set()
402
+ edges_list = []
403
+ assignments_list = []
404
+ angles_list = []
405
+
406
+ # Add crease edges
407
+ for i, line in enumerate(self.crease_lines):
408
+ c = [tuple(round(x, 10) for x in coord) for coord in line.coords]
409
+ edge = tuple(sorted([vertex_map[c[0]], vertex_map[c[1]]]))
410
+ if edge not in edge_set:
411
+ edge_set.add(edge)
412
+ edges_list.append(list(edge))
413
+ assignments_list.append(self.crease_assignments[i])
414
+ angles_list.append(self.crease_angles[i])
415
+
416
+ return {
417
+ 'vertices_coords': [list(v) for v in vertices],
418
+ 'edges_vertices': edges_list,
419
+ 'edges_assignment': assignments_list,
420
+ 'edges_foldAngle': angles_list,
421
+ }
422
+
423
+
424
+ class OrigamiVerifier:
425
+ """Verifiable reward functions based on origami theorems."""
426
+
427
+ @staticmethod
428
+ def check_kawasaki(state: PaperState) -> bool:
429
+ """Kawasaki's theorem: alternating sum of angles at each interior vertex = 0."""
430
+ fold_json = state.to_fold_json()
431
+ vertices = fold_json['vertices_coords']
432
+ edges = fold_json['edges_vertices']
433
+
434
+ for v_idx in range(len(vertices)):
435
+ v = vertices[v_idx]
436
+ incident_edges = [e for e in edges if v_idx in e]
437
+ if len(incident_edges) < 4:
438
+ continue # Need degree-4+ for Kawasaki
439
+
440
+ # Calculate sector angles
441
+ angles = []
442
+ for e in incident_edges:
443
+ other = e[1] if e[0] == v_idx else e[0]
444
+ other_v = vertices[other]
445
+ angle = np.arctan2(other_v[1] - v[1], other_v[0] - v[0])
446
+ angles.append(angle)
447
+
448
+ angles.sort()
449
+ sector_angles = []
450
+ for i in range(len(angles) - 1):
451
+ sector_angles.append(angles[i+1] - angles[i])
452
+ sector_angles.append(2*np.pi - (angles[-1] - angles[0]))
453
+
454
+ # Kawasaki: alternating sum should be ~0
455
+ if len(sector_angles) >= 4:
456
+ alt_sum = sum(sector_angles[::2]) - sum(sector_angles[1::2])
457
+ if abs(alt_sum) > 0.01:
458
+ return False
459
+ return True
460
+
461
+ @staticmethod
462
+ def check_maekawa(state: PaperState) -> bool:
463
+ """Maekawa's theorem: |M - V| = 2 at each interior vertex."""
464
+ fold_json = state.to_fold_json()
465
+ vertices = fold_json['vertices_coords']
466
+ edges = fold_json['edges_vertices']
467
+ assignments = fold_json['edges_assignment']
468
+
469
+ for v_idx in range(len(vertices)):
470
+ incident = [(i, e) for i, e in enumerate(edges) if v_idx in e]
471
+ m_count = sum(1 for i, _ in incident if i < len(assignments) and assignments[i] == 'M')
472
+ v_count = sum(1 for i, _ in incident if i < len(assignments) and assignments[i] == 'V')
473
+
474
+ if m_count + v_count >= 4: # Interior vertex with folds
475
+ if abs(m_count - v_count) != 2:
476
+ return False
477
+ return True
478
+
479
+ @staticmethod
480
+ def crease_pattern_similarity(state: PaperState, target_fold_json: dict) -> float:
481
+ """Compare current crease pattern to target. Returns 0-1 similarity."""
482
+ current = state.to_fold_json()
483
+
484
+ n_current = len(current.get('edges_vertices', []))
485
+ n_target = len(target_fold_json.get('edges_vertices', []))
486
+
487
+ if n_target == 0:
488
+ return 1.0 if n_current == 0 else 0.0
489
+
490
+ edge_count_sim = 1.0 - abs(n_current - n_target) / max(n_target, 1)
491
+ edge_count_sim = max(0, edge_count_sim)
492
+
493
+ current_assignments = current.get('edges_assignment', [])
494
+ target_assignments = target_fold_json.get('edges_assignment', [])
495
+
496
+ c_m = current_assignments.count('M')
497
+ c_v = current_assignments.count('V')
498
+ t_m = target_assignments.count('M')
499
+ t_v = target_assignments.count('V')
500
+
501
+ total = max(t_m + t_v, 1)
502
+ assign_sim = 1.0 - (abs(c_m - t_m) + abs(c_v - t_v)) / (2 * total)
503
+ assign_sim = max(0, assign_sim)
504
+
505
+ return 0.5 * edge_count_sim + 0.5 * assign_sim
506
+ ```
507
+
508
+ ---
509
+
510
+ ## OpenEnv Environment Wrapper
511
+
512
+ ```python
513
+ # origami_env/server.py
514
+ from openenv.core import Environment
515
+ from paper_engine import PaperState, OrigamiVerifier
516
+ from shapely.geometry import LineString
517
+ import json
518
+
519
+ class OrigamiEnvironment(Environment):
520
+
521
+ def __init__(self, targets_dir="targets/", max_steps=20):
522
+ self.targets_dir = targets_dir
523
+ self.max_steps = max_steps
524
+ self.paper = None
525
+ self.target = None
526
+ self.step_count = 0
527
+
528
+ async def reset(self, target_id=None):
529
+ self.paper = PaperState(size=1.0)
530
+ self.target = self._load_target(target_id)
531
+ self.step_count = 0
532
+ return self._get_observation()
533
+
534
+ async def step(self, action):
535
+ self.step_count += 1
536
+
537
+ # Parse action
538
+ try:
539
+ fold_line = LineString(action['fold_line'])
540
+ angle = action['fold_angle']
541
+ assignment = action['assignment']
542
+ except (KeyError, Exception):
543
+ reward = {'format': 0, 'total': -0.1}
544
+ return self._get_observation(), reward, False, {'error': 'parse_failed'}
545
+
546
+ # Apply fold
547
+ result = self.paper.apply_fold(fold_line, angle, assignment)
548
+
549
+ # Compute rewards
550
+ reward = self._compute_reward(result)
551
+
552
+ # Check termination
553
+ done = (
554
+ self.step_count >= self.max_steps or
555
+ reward.get('completion', 0) > 0
556
+ )
557
+
558
+ return self._get_observation(), reward, done, {}
559
+
560
+ async def state(self):
561
+ return {
562
+ 'paper': self.paper.to_fold_json(),
563
+ 'target': self.target,
564
+ 'step': self.step_count,
565
+ 'fold_history': self.paper.fold_history
566
+ }
567
+
568
+ def _compute_reward(self, fold_result):
569
+ rewards = {}
570
+ rewards['format'] = 1.0
571
+
572
+ kawasaki = OrigamiVerifier.check_kawasaki(self.paper)
573
+ maekawa = OrigamiVerifier.check_maekawa(self.paper)
574
+ rewards['validity'] = (float(kawasaki) + float(maekawa)) / 2.0
575
+
576
+ rewards['progress'] = OrigamiVerifier.crease_pattern_similarity(
577
+ self.paper, self.target
578
+ )
579
+
580
+ if rewards['progress'] > 0.95:
581
+ rewards['completion'] = 10.0
582
+
583
+ rewards['efficiency'] = -0.01
584
+
585
+ rewards['total'] = (
586
+ 0.1 * rewards['format'] +
587
+ 0.2 * rewards['validity'] +
588
+ 0.6 * rewards['progress'] +
589
+ rewards.get('completion', 0) +
590
+ rewards['efficiency']
591
+ )
592
+ return rewards
593
+
594
+ def _get_observation(self):
595
+ return {
596
+ 'paper_state': self.paper.to_fold_json(),
597
+ 'target': self.target,
598
+ 'step': self.step_count,
599
+ 'instruction_history': [str(f['line']) for f in self.paper.fold_history]
600
+ }
601
+
602
+ def _load_target(self, target_id):
603
+ if target_id:
604
+ with open(f"{self.targets_dir}/{target_id}.fold") as f:
605
+ return json.load(f)
606
+ # Default: simple valley fold in half
607
+ return {
608
+ 'vertices_coords': [[0,0], [1,0], [1,1], [0,1], [0,0.5], [1,0.5]],
609
+ 'edges_vertices': [[0,1], [1,2], [2,3], [3,0], [4,5]],
610
+ 'edges_assignment': ['B', 'B', 'B', 'B', 'V'],
611
+ 'edges_foldAngle': [0, 0, 0, 0, -180],
612
+ }
613
+ ```
614
+
615
+ ---
616
+
617
+ ## Training Script (Unsloth GRPO)
618
+
619
+ ```python
620
+ # train.py
621
+ from unsloth import FastLanguageModel
622
+ from trl import GRPOConfig, GRPOTrainer
623
+ import torch
624
+
625
+ # Load model
626
+ model, tokenizer = FastLanguageModel.from_pretrained(
627
+ model_name="unsloth/Qwen2.5-7B-Instruct",
628
+ max_seq_length=4096,
629
+ load_in_4bit=True,
630
+ )
631
+
632
+ # Add LoRA
633
+ model = FastLanguageModel.get_peft_model(
634
+ model,
635
+ r=32,
636
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
637
+ "gate_proj", "up_proj", "down_proj"],
638
+ lora_alpha=32,
639
+ lora_dropout=0,
640
+ use_gradient_checkpointing="unsloth",
641
+ )
642
+
643
+ # Reward function
644
+ def origami_reward(completions, prompts):
645
+ """Compute rewards for a batch of completions."""
646
+ rewards = []
647
+ for completion in completions:
648
+ try:
649
+ action = parse_fold_action(completion)
650
+ paper = PaperState()
651
+ result = paper.apply_fold(action['fold_line'], action['angle'], action['assignment'])
652
+ r = compute_reward(paper, target)
653
+ rewards.append(r['total'])
654
+ except Exception:
655
+ rewards.append(-0.1)
656
+ return rewards
657
+
658
+ # GRPO Config
659
+ config = GRPOConfig(
660
+ output_dir="origami-grpo",
661
+ num_train_epochs=3,
662
+ per_device_train_batch_size=4,
663
+ gradient_accumulation_steps=4,
664
+ learning_rate=5e-6,
665
+ max_completion_length=512,
666
+ num_generations=8,
667
+ temperature=1.0,
668
+ logging_steps=1,
669
+ )
670
+
671
+ dataset = load_origami_prompts()
672
+
673
+ trainer = GRPOTrainer(
674
+ model=model,
675
+ config=config,
676
+ train_dataset=dataset,
677
+ reward_funcs=[origami_reward],
678
+ tokenizer=tokenizer,
679
+ )
680
+
681
+ trainer.train()
682
+ ```
683
+
684
+ ---
685
+
686
+ ## Visualization (Demo Only — Not in Training Loop)
687
+
688
+ ### Options
689
+
690
+ 1. **Origami Simulator** — https://github.com/amandaghassaei/OrigamiSimulator — Three.js, accepts FOLD files, shows folding animation with strain visualization
691
+ 2. **PackCAD** — https://packcad.com/ — Web-based, SVG crease patterns, rigid folding simulation
692
+ 3. **Custom Three.js** — Simpler but more control
693
+
694
+ ### Demo UI Layout
695
+
696
+ ```
697
+ +----------------------+----------------------+
698
+ | Instruction Stream | 3D Fold Viewer |
699
+ | | |
700
+ | Step 1: Valley fold | [Three.js canvas] |
701
+ | along center [OK] | |
702
+ | | Paper animating |
703
+ | Step 2: Fold top | fold by fold |
704
+ | corners to center | |
705
+ | | |
706
+ +----------------------+----------------------+
707
+ | Reward Dashboard |
708
+ | Format: ========== 1.0 |
709
+ | Validity: ========.. 0.8 |
710
+ | Progress: ======.... 0.6 |
711
+ | Total: =======... 0.72 |
712
+ | |
713
+ | [Reward curve over training steps] |
714
+ +----------------------------------------------+
715
+ ```
716
+
717
+ ---
718
+
719
+ ## Key Libraries and Resources
720
+
721
+ | Tool | Purpose | Link |
722
+ |------|---------|------|
723
+ | OpenEnv | Environment framework | https://github.com/meta-pytorch/OpenEnv |
724
+ | Unsloth | GRPO training | https://github.com/unslothai/unsloth |
725
+ | OpenPipe ART | Multi-turn RL trainer | https://github.com/OpenPipe/ART |
726
+ | FOLD format | Origami data structure | https://github.com/edemaine/fold |
727
+ | Rabbit Ear | JS origami library | https://github.com/rabbit-ear/rabbit-ear |
728
+ | Origami Simulator | 3D visualization | https://github.com/amandaghassaei/OrigamiSimulator |
729
+ | PackCAD | Folding simulation | https://packcad.com/ |
730
+ | Shapely | Python geometry | pip install shapely |
731
+ | rigid-origami gym | Reference gym env | https://github.com/belalugaX/rigid-origami |
732
+
733
+ ### Papers to Cite
734
+
735
+ - OrigamiSpace: https://arxiv.org/abs/2511.18450
736
+ - GamiBench: https://arxiv.org/abs/2512.22207
737
+ - SpatialThinker: https://arxiv.org/abs/2511.07403
738
+ - Automating Rigid Origami Design: https://arxiv.org/abs/2211.13219
739
+ - FOLD format spec: https://github.com/edemaine/fold/blob/main/doc/spec.md
740
+
741
+ ---
742
+
743
+ ## Priority Build Order
744
+
745
+ 1. **Python geometry engine** — PaperState class with fold operations and FOLD export
746
+ 2. **Verifier functions** — Kawasaki, Maekawa, similarity metrics
747
+ 3. **OpenEnv wrapper** — step/reset/state API
748
+ 4. **Simple targets** — Hand-create 5-10 Level 1-2 targets as .fold files
749
+ 5. **Training script** — Wire up Unsloth GRPO with reward function
750
+ 6. **Run training** — Even on small model, get reward curves
751
+ 7. **Three.js visualizer** — For demo only, not in training loop
752
+ 8. **Before/after demo** — Show base model vs trained model outputs
753
+ 9. **Polish presentation narrative**
754
+
755
+ ---
756
+
757
+ ## Narrative for Judges
758
+
759
+ **The story arc:**
760
+
761
+ 1. "LLMs are great at text but terrible at spatial reasoning"
762
+ 2. "Origami is the perfect testbed — it's sequential, physical, and verifiable"
763
+ 3. "NeurIPS 2025 showed even GPT-5 fails at origami benchmarks, but nobody built a TRAINING environment"
764
+ 4. "We built OrigamiRL — the first multi-turn RL environment for origami instruction generation"
765
+ 5. "Our rewards come from math theorems, not vibes — Kawasaki's theorem is our unit test"
766
+ 6. "Watch the model go from generating paper-tearing nonsense to valid fold sequences"
767
+ 7. "This generalizes to any domain where LLMs need to output structured physical instructions"
engine/fold_engine.py DELETED
@@ -1,207 +0,0 @@
1
- """
2
- Fold execution engine.
3
-
4
- Applies fold operations (valley / mountain) to a Paper object using
5
- Rodrigues' rotation formula, face splitting, and layer tracking.
6
- """
7
-
8
- from __future__ import annotations
9
-
10
- import math
11
- from typing import Callable
12
-
13
- import numpy as np
14
-
15
- from .paper import Paper
16
-
17
-
18
- # ────────────────────────────────────────────────────────────────────
19
- # Rodrigues' rotation
20
- # ────────────────────────────────────────────────────────────────────
21
-
22
- def _rodrigues_rotate(
23
- points: np.ndarray,
24
- axis_point: np.ndarray,
25
- axis_dir: np.ndarray,
26
- angle_rad: float,
27
- ) -> np.ndarray:
28
- """Rotate *points* (N, 3) around an axis defined by a point and direction
29
- using Rodrigues' rotation formula. Returns rotated points (N, 3)."""
30
- k = axis_dir / (np.linalg.norm(axis_dir) + 1e-30)
31
- translated = points - axis_point
32
- cos_a = math.cos(angle_rad)
33
- sin_a = math.sin(angle_rad)
34
- dot_term = np.dot(translated, k).reshape(-1, 1) * k # (N,1)*(3,) broadcast
35
- rotated = (
36
- translated * cos_a
37
- + np.cross(k, translated) * sin_a
38
- + dot_term * (1.0 - cos_a)
39
- )
40
- return rotated + axis_point
41
-
42
-
43
- # ────────────────────────────────────────────────────────────────────
44
- # Single fold
45
- # ────────────────────────────────────────────────────────────────────
46
-
47
- def apply_fold(
48
- paper: Paper,
49
- fold_dict: dict,
50
- ) -> tuple[Paper, str | None]:
51
- """Apply a single fold to *paper* and return ``(new_paper, error_or_None)``.
52
-
53
- *fold_dict* has the form::
54
-
55
- {
56
- "type": "valley" | "mountain",
57
- "line": {"start": [x, y], "end": [x, y]},
58
- "angle": 0-180,
59
- }
60
-
61
- Steps:
62
- 1. Validate inputs.
63
- 2. Split faces along the fold line.
64
- 3. Determine vertices to rotate (one side of fold line).
65
- 4. Rodrigues' rotation of those vertices.
66
- 5. Update edge assignments for new fold-line edges.
67
- 6. Update fold angles.
68
- 7. Update layer tracking.
69
- """
70
-
71
- # ── 0. parse & validate ─────────────────────────────────────────
72
- fold_type = fold_dict.get("type", "valley")
73
- line = fold_dict.get("line", {})
74
- angle_deg = fold_dict.get("angle", 180)
75
-
76
- if fold_type not in ("valley", "mountain"):
77
- return paper, f"Unknown fold type: {fold_type}"
78
-
79
- try:
80
- start_2d = np.array(line["start"], dtype=np.float64)[:2]
81
- end_2d = np.array(line["end"], dtype=np.float64)[:2]
82
- except (KeyError, TypeError, IndexError) as exc:
83
- return paper, f"Invalid fold line: {exc}"
84
-
85
- if np.linalg.norm(end_2d - start_2d) < 1e-12:
86
- return paper, "Fold line has zero length"
87
-
88
- if not (0 < angle_deg <= 180):
89
- return paper, f"Angle must be in (0, 180], got {angle_deg}"
90
-
91
- # ── 1. deep copy so the original is untouched ───────────────────
92
- new_paper = paper.copy()
93
-
94
- # ── 2. split faces along fold line ──────────────────────────────
95
- try:
96
- fold_edge_ids = new_paper.split_faces_along_line(start_2d, end_2d)
97
- except Exception as exc:
98
- return paper, f"Face split failed: {exc}"
99
-
100
- # ── 3. determine vertices to rotate ─────────────────────────────
101
- rotate_ids = new_paper.get_vertices_on_side(start_2d, end_2d, "positive")
102
-
103
- if not rotate_ids:
104
- # Try the other side — maybe the fold line is at the boundary
105
- rotate_ids = new_paper.get_vertices_on_side(start_2d, end_2d, "negative")
106
- if not rotate_ids:
107
- return paper, "No vertices to rotate — fold line may not intersect paper"
108
-
109
- # ── 4. Rodrigues' rotation ──────────────────────────────────────
110
- sign = 1.0 if fold_type == "valley" else -1.0
111
- angle_rad = sign * math.radians(angle_deg)
112
-
113
- axis_point = np.array([start_2d[0], start_2d[1], 0.0])
114
- axis_dir = np.array([end_2d[0] - start_2d[0], end_2d[1] - start_2d[1], 0.0])
115
-
116
- pts = new_paper.vertices[rotate_ids]
117
- rotated = _rodrigues_rotate(pts, axis_point, axis_dir, angle_rad)
118
- new_paper.vertices[rotate_ids] = rotated
119
-
120
- # ── 5. update edge assignments ──────────────────────────────────
121
- assignment = "V" if fold_type == "valley" else "M"
122
- for eidx in fold_edge_ids:
123
- if eidx < len(new_paper.assignments):
124
- new_paper.assignments[eidx] = assignment
125
-
126
- # ── 6. update fold angles ───────────────────────────────────────
127
- for eidx in fold_edge_ids:
128
- if eidx < len(new_paper.fold_angles):
129
- new_paper.fold_angles[eidx] = angle_deg * sign
130
-
131
- # ── 7. update layer tracking ────────────────────────────────────
132
- # For each pair of faces on opposite sides of the fold line, record
133
- # layer ordering. Simple heuristic: faces that were rotated are now
134
- # on top (sign +1) of faces that stayed put.
135
- rotated_set = set(rotate_ids)
136
-
137
- def _face_side(face_verts: list[int]) -> str:
138
- """Classify a face as 'rotated', 'fixed', or 'mixed'."""
139
- r_count = sum(1 for v in face_verts if v in rotated_set)
140
- if r_count == len(face_verts):
141
- return "rotated"
142
- if r_count == 0:
143
- return "fixed"
144
- return "mixed"
145
-
146
- face_sides = [_face_side(f) for f in new_paper.faces]
147
- for i in range(len(new_paper.faces)):
148
- for j in range(i + 1, len(new_paper.faces)):
149
- if face_sides[i] == "rotated" and face_sides[j] == "fixed":
150
- new_paper.face_orders.append((i, j, 1))
151
- elif face_sides[i] == "fixed" and face_sides[j] == "rotated":
152
- new_paper.face_orders.append((j, i, 1))
153
-
154
- return new_paper, None
155
-
156
-
157
- # ────────────────────────────────────────────────────────────────────
158
- # Strategy executor (matches mock_env.execute_fold_strategy signature)
159
- # ────────────────────────────────────────────────────────────────────
160
-
161
- def execute_fold_strategy(
162
- strategy_fn: Callable,
163
- paper: Paper,
164
- max_folds: int = 20,
165
- ) -> tuple[Paper, list[dict], str | None]:
166
- """Execute a ``fold_strategy`` function against the real physics engine.
167
-
168
- Signature matches ``mock_env.execute_fold_strategy`` so the trainer
169
- reward functions can swap engines transparently.
170
-
171
- Parameters
172
- ----------
173
- strategy_fn : callable
174
- ``strategy_fn(paper_state_dict) -> list[dict]``
175
- paper : Paper
176
- The initial paper state.
177
- max_folds : int
178
- Maximum number of folds to apply.
179
-
180
- Returns
181
- -------
182
- (final_paper, applied_folds, error_or_None)
183
- """
184
- state_dict = paper.to_dict()
185
- try:
186
- folds = strategy_fn(state_dict)
187
- except Exception as exc:
188
- return paper, [], f"Strategy function raised: {exc}"
189
-
190
- if not isinstance(folds, list):
191
- return paper, [], "Strategy must return a list of fold dicts"
192
-
193
- applied: list[dict] = []
194
- current = paper
195
-
196
- for i, fold in enumerate(folds):
197
- if i >= max_folds:
198
- break
199
- if not isinstance(fold, dict):
200
- return current, applied, f"Fold {i} is not a dict"
201
-
202
- current, error = apply_fold(current, fold)
203
- if error:
204
- return current, applied, f"Fold {i} failed: {error}"
205
- applied.append(fold)
206
-
207
- return current, applied, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
engine/materials.py DELETED
@@ -1,79 +0,0 @@
1
- """
2
- Material definitions for origami simulation.
3
-
4
- Provides dataclass-based material properties and preset materials
5
- for paper, mylar, aluminum, and nitinol.
6
- """
7
-
8
- from dataclasses import dataclass
9
-
10
-
11
- @dataclass
12
- class Material:
13
- name: str
14
- thickness_mm: float # mm
15
- youngs_modulus_gpa: float # GPa
16
- max_strain: float # fraction (e.g. 0.03 = 3%)
17
- poissons_ratio: float = 0.3 # dimensionless
18
-
19
- @property
20
- def thickness_m(self) -> float:
21
- """Thickness in meters."""
22
- return self.thickness_mm / 1000.0
23
-
24
- @property
25
- def youngs_modulus_pa(self) -> float:
26
- """Young's modulus in Pascals."""
27
- return self.youngs_modulus_gpa * 1e9
28
-
29
-
30
- # ── Preset materials ────────────────────────────────────────────────
31
-
32
- MATERIAL_PRESETS: dict[str, Material] = {
33
- "paper": Material(
34
- name="paper",
35
- thickness_mm=0.1,
36
- youngs_modulus_gpa=2.0,
37
- max_strain=0.03,
38
- poissons_ratio=0.3,
39
- ),
40
- "mylar": Material(
41
- name="mylar",
42
- thickness_mm=0.05,
43
- youngs_modulus_gpa=4.0,
44
- max_strain=0.03,
45
- poissons_ratio=0.38,
46
- ),
47
- "aluminum": Material(
48
- name="aluminum",
49
- thickness_mm=0.1,
50
- youngs_modulus_gpa=69.0,
51
- max_strain=0.01,
52
- poissons_ratio=0.33,
53
- ),
54
- "nitinol": Material(
55
- name="nitinol",
56
- thickness_mm=0.1,
57
- youngs_modulus_gpa=75.0,
58
- max_strain=0.08,
59
- poissons_ratio=0.33,
60
- ),
61
- }
62
-
63
-
64
- def get_material(name: str) -> Material:
65
- """Return a copy of a preset material by name.
66
-
67
- Raises KeyError if name is not in MATERIAL_PRESETS.
68
- """
69
- if name not in MATERIAL_PRESETS:
70
- available = ", ".join(sorted(MATERIAL_PRESETS))
71
- raise KeyError(f"Unknown material '{name}'. Available: {available}")
72
- preset = MATERIAL_PRESETS[name]
73
- return Material(
74
- name=preset.name,
75
- thickness_mm=preset.thickness_mm,
76
- youngs_modulus_gpa=preset.youngs_modulus_gpa,
77
- max_strain=preset.max_strain,
78
- poissons_ratio=preset.poissons_ratio,
79
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
engine/metrics.py DELETED
@@ -1,104 +0,0 @@
1
- """
2
- Quality metrics for folded origami.
3
-
4
- Computes bounding box, deployment ratio, fold count, and aggregated
5
- metric dictionaries for the trainer reward functions.
6
- """
7
-
8
- from __future__ import annotations
9
-
10
- import numpy as np
11
-
12
- from .paper import Paper
13
-
14
-
15
- def compute_bounding_box(paper: Paper) -> np.ndarray:
16
- """Axis-aligned bounding-box dimensions (dx, dy, dz).
17
-
18
- Returns shape (3,) array. Minimum z-thickness accounts for
19
- material thickness times estimated layer count.
20
- """
21
- if len(paper.vertices) == 0:
22
- return np.zeros(3)
23
-
24
- ptp = np.ptp(paper.vertices, axis=0)
25
- ptp = np.where(np.abs(ptp) < 1e-12, 0.0, ptp)
26
-
27
- # Minimum z from material thickness * layers
28
- t = paper.material.thickness_mm / 1000.0
29
- ptp[2] = max(ptp[2], t * paper.num_layers)
30
-
31
- return ptp
32
-
33
-
34
- def compute_deployment_ratio(paper: Paper) -> float:
35
- """Ratio of folded XY footprint area to original sheet area.
36
-
37
- A fully flat unfolded sheet has ratio 1.0; a tightly folded sheet
38
- approaches 0.0.
39
- """
40
- if paper.original_area <= 0:
41
- return 1.0
42
-
43
- bb = compute_bounding_box(paper)
44
- folded_area = bb[0] * bb[1]
45
-
46
- ratio = folded_area / paper.original_area
47
- return float(np.clip(ratio, 0.0, 1.0))
48
-
49
-
50
- def compute_fold_count(paper: Paper) -> int:
51
- """Number of mountain (M) and valley (V) edges."""
52
- return sum(1 for a in paper.assignments if a in ("M", "V"))
53
-
54
-
55
- def compute_compactness(paper: Paper) -> float:
56
- """1 - deployment_ratio. Higher is more compact."""
57
- return 1.0 - compute_deployment_ratio(paper)
58
-
59
-
60
- def compute_volume(paper: Paper) -> float:
61
- """Bounding-box volume in cubic meters."""
62
- bb = compute_bounding_box(paper)
63
- return float(bb[0] * bb[1] * bb[2])
64
-
65
-
66
- def compute_metrics(paper: Paper, original_paper: Paper | None = None) -> dict:
67
- """Compute all quality metrics and return as a dict.
68
-
69
- Parameters
70
- ----------
71
- paper : Paper
72
- The current (folded) paper state.
73
- original_paper : Paper or None
74
- The original (unfolded) paper, used for strain comparison.
75
- If None, strain is computed against the current paper's rest lengths.
76
-
77
- Returns
78
- -------
79
- dict with keys:
80
- bounding_box, deployment_ratio, fold_count, compactness,
81
- volume, max_strain, mean_strain, num_vertices, num_faces,
82
- num_layers.
83
- """
84
- from .physics import compute_strain # local import to avoid circular
85
-
86
- bb = compute_bounding_box(paper)
87
- strain = compute_strain(paper)
88
-
89
- return {
90
- "bounding_box": {
91
- "x": float(bb[0]),
92
- "y": float(bb[1]),
93
- "z": float(bb[2]),
94
- },
95
- "deployment_ratio": compute_deployment_ratio(paper),
96
- "fold_count": compute_fold_count(paper),
97
- "compactness": compute_compactness(paper),
98
- "volume": compute_volume(paper),
99
- "max_strain": float(np.max(strain)) if len(strain) > 0 else 0.0,
100
- "mean_strain": float(np.mean(strain)) if len(strain) > 0 else 0.0,
101
- "num_vertices": len(paper.vertices),
102
- "num_faces": len(paper.faces),
103
- "num_layers": paper.num_layers,
104
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
engine/paper.py DELETED
@@ -1,488 +0,0 @@
1
- """
2
- Paper — the core geometric data structure for origami simulation.
3
-
4
- Stores vertices, edges, faces, fold assignments, fold angles, layer ordering,
5
- and material. Supports FOLD-format serialization and the face-splitting
6
- operation needed by the fold engine.
7
- """
8
-
9
- from __future__ import annotations
10
-
11
- import copy
12
- import json
13
- from dataclasses import dataclass, field
14
- from typing import Any
15
-
16
- import numpy as np
17
-
18
- from .materials import Material, get_material
19
-
20
-
21
- # ────────────────────────────────────────────────────────────────────
22
- # Helper: 2-D line-segment intersection
23
- # ────────────────────────────────────────────────────────────────────
24
-
25
- def _seg_seg_intersect_2d(
26
- p1: np.ndarray, p2: np.ndarray,
27
- p3: np.ndarray, p4: np.ndarray,
28
- eps: float = 1e-10,
29
- ) -> np.ndarray | None:
30
- """Return the intersection point of segments (p1-p2) and (p3-p4) in 2-D,
31
- or None if they do not intersect. Points that lie on the segment
32
- endpoints are considered intersections (within tolerance *eps*).
33
-
34
- All inputs are shape (2,).
35
- """
36
- d1 = p2 - p1
37
- d2 = p4 - p3
38
- denom = d1[0] * d2[1] - d1[1] * d2[0]
39
-
40
- if abs(denom) < eps:
41
- return None # parallel / collinear
42
-
43
- dp = p3 - p1
44
- t = (dp[0] * d2[1] - dp[1] * d2[0]) / denom
45
- u = (dp[0] * d1[1] - dp[1] * d1[0]) / denom
46
-
47
- if -eps <= t <= 1.0 + eps and -eps <= u <= 1.0 + eps:
48
- return p1 + np.clip(t, 0.0, 1.0) * d1
49
- return None
50
-
51
-
52
- # ────────────────────────────────────────────────────────────────────
53
- # Paper dataclass
54
- # ────────────────────────────────────────────────────────────────────
55
-
56
- @dataclass
57
- class Paper:
58
- """Origami sheet state.
59
-
60
- Attributes
61
- ----------
62
- vertices : np.ndarray, shape (N, 3)
63
- Vertex positions in 3-D.
64
- edges : np.ndarray, shape (E, 2), dtype int
65
- Each row is (v_start, v_end).
66
- faces : list[list[int]]
67
- Each face is an ordered list of vertex indices (CCW winding).
68
- assignments : list[str]
69
- Per-edge assignment: 'M' (mountain), 'V' (valley), 'B' (boundary),
70
- 'F' (flat / unfolded), 'U' (unassigned).
71
- fold_angles : np.ndarray, shape (E,)
72
- Current fold angle (degrees) per edge.
73
- face_orders : list[tuple[int, int, int]]
74
- Layer ordering triples (f_i, f_j, +1/-1) meaning f_i is above/below f_j.
75
- material : Material
76
- The sheet material.
77
- rest_lengths : np.ndarray, shape (E,)
78
- Original (unfolded) edge lengths — used for strain computation.
79
- original_area : float
80
- Area of the sheet before any folds.
81
- """
82
-
83
- vertices: np.ndarray
84
- edges: np.ndarray
85
- faces: list[list[int]]
86
- assignments: list[str]
87
- fold_angles: np.ndarray
88
- face_orders: list[tuple[int, int, int]] = field(default_factory=list)
89
- material: Material = field(default_factory=lambda: get_material("paper"))
90
- rest_lengths: np.ndarray = field(default_factory=lambda: np.empty(0))
91
- original_area: float = 0.0
92
-
93
- # ── constructors ────────────────────────────────────────────────
94
-
95
- @staticmethod
96
- def create_flat_sheet(
97
- width: float = 1.0,
98
- height: float = 1.0,
99
- material: Material | None = None,
100
- ) -> "Paper":
101
- """Create a flat rectangular sheet with 4 vertices, 5 edges
102
- (including one diagonal), and 2 triangular faces."""
103
- mat = material if material is not None else get_material("paper")
104
-
105
- verts = np.array([
106
- [0.0, 0.0, 0.0],
107
- [width, 0.0, 0.0],
108
- [width, height, 0.0],
109
- [0.0, height, 0.0],
110
- ], dtype=np.float64)
111
-
112
- edges = np.array([
113
- [0, 1], # bottom
114
- [1, 2], # right
115
- [2, 3], # top
116
- [3, 0], # left
117
- [0, 2], # diagonal
118
- ], dtype=np.int64)
119
-
120
- faces: list[list[int]] = [[0, 1, 2], [0, 2, 3]]
121
- assignments = ["B", "B", "B", "B", "F"]
122
- fold_angles = np.zeros(len(edges), dtype=np.float64)
123
- rest_lengths = np.array(
124
- [np.linalg.norm(verts[e[1]] - verts[e[0]]) for e in edges],
125
- dtype=np.float64,
126
- )
127
-
128
- return Paper(
129
- vertices=verts,
130
- edges=edges,
131
- faces=faces,
132
- assignments=assignments,
133
- fold_angles=fold_angles,
134
- material=mat,
135
- rest_lengths=rest_lengths,
136
- original_area=width * height,
137
- )
138
-
139
- # ── dict / prompt serialization (matches mock_env.PaperState.to_dict) ──
140
-
141
- def to_dict(self) -> dict:
142
- """Return a simplified dict suitable for LLM prompts.
143
-
144
- The format matches ``mock_env.PaperState.to_dict()`` so that the
145
- trainer reward functions work with either engine.
146
- """
147
- bb = self.bounding_box
148
- return {
149
- "width": float(bb[0]),
150
- "height": float(bb[1]),
151
- "material": {
152
- "name": self.material.name,
153
- "thickness_mm": self.material.thickness_mm,
154
- "youngs_modulus_gpa": self.material.youngs_modulus_gpa,
155
- },
156
- "vertices": self.vertices.tolist(),
157
- "edges": self.edges.tolist(),
158
- "assignments": list(self.assignments),
159
- "fold_angles": self.fold_angles.tolist(),
160
- "num_layers_at_center": self.num_layers,
161
- "bounding_box": {
162
- "x": float(bb[0]),
163
- "y": float(bb[1]),
164
- "z": float(bb[2]),
165
- },
166
- }
167
-
168
- # ── FOLD format serialization ───────────────────────────────────
169
-
170
- def to_fold_json(self) -> str:
171
- """Serialize to FOLD JSON format (v1.1 subset)."""
172
- fold = {
173
- "file_spec": 1.1,
174
- "file_creator": "optigami",
175
- "file_classes": ["singleModel"],
176
- "frame_classes": ["foldedForm"],
177
- "vertices_coords": self.vertices.tolist(),
178
- "edges_vertices": self.edges.tolist(),
179
- "edges_assignment": self.assignments,
180
- "edges_foldAngle": self.fold_angles.tolist(),
181
- "faces_vertices": self.faces,
182
- "faceOrders": [list(fo) for fo in self.face_orders],
183
- }
184
- return json.dumps(fold, indent=2)
185
-
186
- @staticmethod
187
- def from_fold_json(data: str | dict, material: Material | None = None) -> "Paper":
188
- """Deserialize from FOLD JSON format."""
189
- if isinstance(data, str):
190
- data = json.loads(data)
191
-
192
- verts = np.array(data["vertices_coords"], dtype=np.float64)
193
- edges = np.array(data["edges_vertices"], dtype=np.int64)
194
- faces = data.get("faces_vertices", [])
195
- assignments = data.get("edges_assignment", ["U"] * len(edges))
196
- fold_angles = np.array(
197
- data.get("edges_foldAngle", [0.0] * len(edges)),
198
- dtype=np.float64,
199
- )
200
- face_orders = [tuple(fo) for fo in data.get("faceOrders", [])]
201
-
202
- rest_lengths = np.array(
203
- [np.linalg.norm(verts[e[1]] - verts[e[0]]) for e in edges],
204
- dtype=np.float64,
205
- )
206
-
207
- mat = material if material is not None else get_material("paper")
208
-
209
- # Approximate original area from convex hull of initial XY footprint
210
- try:
211
- from scipy.spatial import ConvexHull
212
- hull = ConvexHull(verts[:, :2])
213
- area = hull.volume # 2-D ConvexHull.volume is area
214
- except Exception:
215
- # Fallback: bounding-box area from XY coordinates
216
- if len(verts) >= 2:
217
- ptp = np.ptp(verts[:, :2], axis=0)
218
- area = float(ptp[0] * ptp[1])
219
- else:
220
- area = 0.0
221
-
222
- return Paper(
223
- vertices=verts,
224
- edges=edges,
225
- faces=faces,
226
- assignments=assignments,
227
- fold_angles=fold_angles,
228
- face_orders=face_orders,
229
- material=mat,
230
- rest_lengths=rest_lengths,
231
- original_area=area,
232
- )
233
-
234
- # ── computed properties ─────────────────────────────────────────
235
-
236
- @property
237
- def bounding_box(self) -> np.ndarray:
238
- """Axis-aligned bounding-box dimensions (dx, dy, dz)."""
239
- if len(self.vertices) == 0:
240
- return np.zeros(3)
241
- ptp = np.ptp(self.vertices, axis=0)
242
- ptp = np.where(np.abs(ptp) < 1e-12, 0.0, ptp)
243
- # Ensure minimum z height from material thickness * layers
244
- t = self.material.thickness_mm / 1000.0
245
- ptp[2] = max(ptp[2], t * self.num_layers)
246
- return ptp
247
-
248
- @property
249
- def num_layers(self) -> int:
250
- """Estimate layer count from face-order triples.
251
-
252
- Falls back to 1 + number of M/V edges as a simple heuristic when
253
- face_orders is empty.
254
- """
255
- if self.face_orders:
256
- face_ids = set()
257
- for fo in self.face_orders:
258
- face_ids.add(fo[0])
259
- face_ids.add(fo[1])
260
- return max(len(face_ids), 1)
261
- # Heuristic: each fold adds one layer
262
- mv_count = sum(1 for a in self.assignments if a in ("M", "V"))
263
- return 1 + mv_count
264
-
265
- # ── topology helpers ────────────────────────────────────────────
266
-
267
- def _find_or_add_vertex(self, point_3d: np.ndarray, tol: float = 1e-8) -> int:
268
- """Return index of an existing vertex close to *point_3d*, or add a
269
- new vertex and return its index."""
270
- for i, v in enumerate(self.vertices):
271
- if np.linalg.norm(v - point_3d) < tol:
272
- return i
273
- idx = len(self.vertices)
274
- self.vertices = np.vstack([self.vertices, point_3d.reshape(1, 3)])
275
- return idx
276
-
277
- def _find_or_add_edge(self, v1: int, v2: int) -> int:
278
- """Return index of edge (v1,v2) or (v2,v1), or add a new edge and
279
- return its index. New edges get assignment 'F' and fold-angle 0."""
280
- for i, e in enumerate(self.edges):
281
- if (e[0] == v1 and e[1] == v2) or (e[0] == v2 and e[1] == v1):
282
- return i
283
- idx = len(self.edges)
284
- self.edges = np.vstack([self.edges, np.array([[v1, v2]], dtype=np.int64)])
285
- self.assignments.append("F")
286
- self.fold_angles = np.append(self.fold_angles, 0.0)
287
- # Rest length for the new edge
288
- rl = np.linalg.norm(self.vertices[v1] - self.vertices[v2])
289
- self.rest_lengths = np.append(self.rest_lengths, rl)
290
- return idx
291
-
292
- # ── face splitting ──────────────────────────────────────────────
293
-
294
- def split_faces_along_line(
295
- self,
296
- start_2d: np.ndarray | list,
297
- end_2d: np.ndarray | list,
298
- ) -> list[int]:
299
- """Split every face that the 2-D line (start_2d -> end_2d) crosses.
300
-
301
- The line is infinite for intersection purposes (we test each face
302
- edge-segment against the full fold-line extent clipped to the paper).
303
-
304
- Returns a list of edge indices that lie *on* the fold line (i.e. the
305
- newly created edges along the fold path).
306
-
307
- This mutates ``self`` in-place (vertices, edges, faces, assignments,
308
- fold_angles, rest_lengths are updated).
309
- """
310
- start_2d = np.asarray(start_2d, dtype=np.float64)
311
- end_2d = np.asarray(end_2d, dtype=np.float64)
312
-
313
- fold_edge_indices: list[int] = []
314
- new_faces: list[list[int]] = []
315
-
316
- faces_to_process = list(range(len(self.faces)))
317
-
318
- for fi in faces_to_process:
319
- face = self.faces[fi]
320
- n = len(face)
321
-
322
- # Gather intersection points along the face boundary
323
- hits: list[tuple[int, np.ndarray]] = [] # (local_edge_index, point_2d)
324
-
325
- for k in range(n):
326
- v_a = face[k]
327
- v_b = face[(k + 1) % n]
328
- pa = self.vertices[v_a][:2]
329
- pb = self.vertices[v_b][:2]
330
-
331
- pt = _seg_seg_intersect_2d(start_2d, end_2d, pa, pb)
332
- if pt is not None:
333
- hits.append((k, pt))
334
-
335
- # Deduplicate hits that are at the same location (e.g. hitting a vertex)
336
- if len(hits) >= 2:
337
- unique_hits: list[tuple[int, np.ndarray]] = [hits[0]]
338
- for h in hits[1:]:
339
- is_dup = False
340
- for uh in unique_hits:
341
- if np.linalg.norm(h[1] - uh[1]) < 1e-8:
342
- is_dup = True
343
- break
344
- if not is_dup:
345
- unique_hits.append(h)
346
- hits = unique_hits
347
-
348
- if len(hits) < 2:
349
- # Line does not fully cross this face — keep face as-is
350
- new_faces.append(face)
351
- continue
352
-
353
- # We only handle the first two intersection points (one chord across face)
354
- hit_a_edge_idx, hit_a_pt = hits[0]
355
- hit_b_edge_idx, hit_b_pt = hits[1]
356
-
357
- # Create / find 3-D vertices at intersection points (z=0 for flat, interpolated otherwise)
358
- def _interp_z(pt2d: np.ndarray, edge_local: int) -> np.ndarray:
359
- """Interpolate z from the edge endpoints."""
360
- v_a = face[edge_local]
361
- v_b = face[(edge_local + 1) % n]
362
- pa = self.vertices[v_a]
363
- pb = self.vertices[v_b]
364
- seg = pb[:2] - pa[:2]
365
- seg_len = np.linalg.norm(seg)
366
- if seg_len < 1e-12:
367
- return np.array([pt2d[0], pt2d[1], pa[2]])
368
- t = np.linalg.norm(pt2d - pa[:2]) / seg_len
369
- t = np.clip(t, 0.0, 1.0)
370
- z = pa[2] + t * (pb[2] - pa[2])
371
- return np.array([pt2d[0], pt2d[1], z])
372
-
373
- pt_a_3d = _interp_z(hit_a_pt, hit_a_edge_idx)
374
- pt_b_3d = _interp_z(hit_b_pt, hit_b_edge_idx)
375
-
376
- idx_a = self._find_or_add_vertex(pt_a_3d)
377
- idx_b = self._find_or_add_vertex(pt_b_3d)
378
-
379
- if idx_a == idx_b:
380
- new_faces.append(face)
381
- continue
382
-
383
- # Add the fold-line edge between the two intersection points
384
- fold_eidx = self._find_or_add_edge(idx_a, idx_b)
385
- fold_edge_indices.append(fold_eidx)
386
-
387
- # ── Split the face into two sub-faces ──
388
- # Walk around the face vertices, inserting idx_a and idx_b at the
389
- # appropriate positions, then split into two loops.
390
- ordered_verts = list(face)
391
-
392
- # Insert intersection vertices into the vertex ring if not already present
393
- def _insert_after(ring: list[int], after_local: int, vid: int) -> list[int]:
394
- """Insert *vid* after position *after_local* if it is not already
395
- adjacent in the ring at that position."""
396
- pos = after_local + 1
397
- if ring[after_local % len(ring)] == vid:
398
- return ring
399
- if ring[pos % len(ring)] == vid:
400
- return ring
401
- return ring[:pos] + [vid] + ring[pos:]
402
-
403
- # Determine insertion order — always insert the one with the
404
- # larger local-edge index first so that the earlier index stays valid.
405
- if hit_a_edge_idx <= hit_b_edge_idx:
406
- ordered_verts = _insert_after(ordered_verts, hit_b_edge_idx, idx_b)
407
- # Recompute hit_a_edge_idx offset if idx_b was inserted before it
408
- # (it shouldn't be, since hit_b >= hit_a, but guard anyway)
409
- a_pos = hit_a_edge_idx
410
- ordered_verts = _insert_after(ordered_verts, a_pos, idx_a)
411
- else:
412
- ordered_verts = _insert_after(ordered_verts, hit_a_edge_idx, idx_a)
413
- ordered_verts = _insert_after(ordered_verts, hit_b_edge_idx, idx_b)
414
-
415
- # Now split the ring at idx_a and idx_b
416
- try:
417
- pos_a = ordered_verts.index(idx_a)
418
- pos_b = ordered_verts.index(idx_b)
419
- except ValueError:
420
- new_faces.append(face)
421
- continue
422
-
423
- if pos_a > pos_b:
424
- pos_a, pos_b = pos_b, pos_a
425
-
426
- loop1 = ordered_verts[pos_a: pos_b + 1]
427
- loop2 = ordered_verts[pos_b:] + ordered_verts[: pos_a + 1]
428
-
429
- # Only keep faces with >= 3 unique vertices
430
- for loop in (loop1, loop2):
431
- unique = list(dict.fromkeys(loop)) # preserve order, dedupe
432
- if len(unique) >= 3:
433
- new_faces.append(unique)
434
- # Ensure all edges of this new face exist
435
- for k in range(len(unique)):
436
- self._find_or_add_edge(unique[k], unique[(k + 1) % len(unique)])
437
-
438
- self.faces = new_faces
439
- return fold_edge_indices
440
-
441
- # ── vertex side test ────────────────────────────────────────────
442
-
443
- def get_vertices_on_side(
444
- self,
445
- line_start: np.ndarray | list,
446
- line_end: np.ndarray | list,
447
- side: str = "positive",
448
- ) -> list[int]:
449
- """Return indices of vertices on one side of a 2-D line.
450
-
451
- *side* can be ``"positive"`` or ``"negative"``. The positive side is
452
- defined by the left-hand normal of (line_end - line_start).
453
- """
454
- ls = np.asarray(line_start, dtype=np.float64)[:2]
455
- le = np.asarray(line_end, dtype=np.float64)[:2]
456
- d = le - ls
457
- normal = np.array([-d[1], d[0]])
458
-
459
- indices: list[int] = []
460
- for i, v in enumerate(self.vertices):
461
- dot = np.dot(v[:2] - ls, normal)
462
- if side == "positive" and dot > 1e-9:
463
- indices.append(i)
464
- elif side == "negative" and dot < -1e-9:
465
- indices.append(i)
466
- return indices
467
-
468
- # ── deep copy ───────────────────────────────────────────────────
469
-
470
- def copy(self) -> "Paper":
471
- """Return an independent deep copy."""
472
- return Paper(
473
- vertices=self.vertices.copy(),
474
- edges=self.edges.copy(),
475
- faces=copy.deepcopy(self.faces),
476
- assignments=list(self.assignments),
477
- fold_angles=self.fold_angles.copy(),
478
- face_orders=list(self.face_orders),
479
- material=Material(
480
- name=self.material.name,
481
- thickness_mm=self.material.thickness_mm,
482
- youngs_modulus_gpa=self.material.youngs_modulus_gpa,
483
- max_strain=self.material.max_strain,
484
- poissons_ratio=self.material.poissons_ratio,
485
- ),
486
- rest_lengths=self.rest_lengths.copy(),
487
- original_area=self.original_area,
488
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
engine/physics.py DELETED
@@ -1,257 +0,0 @@
1
- """
2
- Bar-and-hinge physics model.
3
-
4
- Three energy components:
5
- E_total = E_bar + E_facet + E_fold
6
-
7
- Stiffness parameters are derived from the material properties.
8
- """
9
-
10
- from __future__ import annotations
11
-
12
- from dataclasses import dataclass
13
-
14
- import numpy as np
15
-
16
- from .paper import Paper
17
-
18
-
19
- # ────────────────────────────────────────────────────────────────────
20
- # Stiffness
21
- # ────────────────────────────────────────────────────────────────────
22
-
23
- @dataclass
24
- class StiffnessParams:
25
- """Stiffness values derived from material properties."""
26
- k_axial: np.ndarray # per-edge axial stiffness (E,)
27
- k_facet: float # facet (panel bending) stiffness
28
- k_fold: float # fold (crease torsion) stiffness
29
-
30
-
31
- def compute_stiffness(paper: Paper) -> StiffnessParams:
32
- """Derive stiffness parameters from the paper's material and geometry.
33
-
34
- k_axial = E * t * w / L0 (per edge, w ≈ average of adjacent edge lengths)
35
- k_facet = E * t^3 / (12 * (1 - nu^2))
36
- k_fold = 0.1 * k_facet (crease torsional stiffness, empirical fraction)
37
- """
38
- mat = paper.material
39
- E = mat.youngs_modulus_pa # Pa
40
- t = mat.thickness_m # m
41
- nu = mat.poissons_ratio
42
-
43
- rest = paper.rest_lengths
44
- # Guard against zero rest lengths
45
- safe_rest = np.where(rest > 1e-15, rest, 1e-15)
46
-
47
- # Approximate edge width as the average rest length (simple heuristic)
48
- w = np.mean(safe_rest) if len(safe_rest) > 0 else 1e-3
49
-
50
- k_axial = E * t * w / safe_rest # (E,)
51
-
52
- k_facet = E * t ** 3 / (12.0 * (1.0 - nu ** 2))
53
-
54
- # Crease torsional stiffness — a fraction of facet stiffness
55
- k_fold = 0.1 * k_facet
56
-
57
- return StiffnessParams(k_axial=k_axial, k_facet=k_facet, k_fold=k_fold)
58
-
59
-
60
- # ────────────────────────────────────────────────────────────────────
61
- # Energy components
62
- # ────────────────────────────────────────────────────────────────────
63
-
64
- def compute_bar_energy(paper: Paper) -> float:
65
- """E_bar = sum (1/2) * k_axial * (L - L0)^2
66
-
67
- Measures stretching / compression of edges relative to rest lengths.
68
- """
69
- if len(paper.edges) == 0:
70
- return 0.0
71
-
72
- verts = paper.vertices
73
- edges = paper.edges
74
- current_lengths = np.array([
75
- np.linalg.norm(verts[e[1]] - verts[e[0]]) for e in edges
76
- ])
77
-
78
- stiff = compute_stiffness(paper)
79
- delta = current_lengths - paper.rest_lengths
80
- energy = 0.5 * np.sum(stiff.k_axial * delta ** 2)
81
- return float(energy)
82
-
83
-
84
- def compute_facet_energy(paper: Paper) -> float:
85
- """E_facet = sum (1/2) * k_facet * l * (theta - pi)^2
86
-
87
- Measures bending of facet panels away from flat (pi).
88
- *l* is the edge length (hinge length) and *theta* is the dihedral angle
89
- across the edge between two adjacent faces. For edges that are not
90
- shared by two faces we skip them.
91
- """
92
- if len(paper.edges) == 0 or len(paper.faces) < 2:
93
- return 0.0
94
-
95
- stiff = compute_stiffness(paper)
96
- verts = paper.vertices
97
- edges = paper.edges
98
-
99
- # Build edge → face adjacency
100
- edge_faces: dict[int, list[int]] = {}
101
- for fi, face in enumerate(paper.faces):
102
- n = len(face)
103
- for k in range(n):
104
- va, vb = face[k], face[(k + 1) % n]
105
- for ei, e in enumerate(edges):
106
- if (e[0] == va and e[1] == vb) or (e[0] == vb and e[1] == va):
107
- edge_faces.setdefault(ei, []).append(fi)
108
- break
109
-
110
- energy = 0.0
111
- for ei, adj_faces in edge_faces.items():
112
- if len(adj_faces) < 2:
113
- continue
114
- # Only consider non-fold edges (flat or boundary interior)
115
- if paper.assignments[ei] in ("M", "V"):
116
- continue
117
-
118
- f1, f2 = adj_faces[0], adj_faces[1]
119
- theta = _dihedral_angle(verts, paper.faces[f1], paper.faces[f2], edges[ei])
120
- l = np.linalg.norm(verts[edges[ei][1]] - verts[edges[ei][0]])
121
- energy += 0.5 * stiff.k_facet * l * (theta - np.pi) ** 2
122
-
123
- return float(energy)
124
-
125
-
126
- def compute_fold_energy(paper: Paper) -> float:
127
- """E_fold = sum (1/2) * k_fold * l * (rho - rho_target)^2
128
-
129
- Measures deviation of fold creases from their target angles.
130
- *rho* is the current dihedral angle across the fold edge and
131
- *rho_target* comes from ``fold_angles``.
132
- """
133
- if len(paper.edges) == 0:
134
- return 0.0
135
-
136
- stiff = compute_stiffness(paper)
137
- verts = paper.vertices
138
- edges = paper.edges
139
-
140
- # Build edge → face adjacency
141
- edge_faces: dict[int, list[int]] = {}
142
- for fi, face in enumerate(paper.faces):
143
- n = len(face)
144
- for k in range(n):
145
- va, vb = face[k], face[(k + 1) % n]
146
- for ei, e in enumerate(edges):
147
- if (e[0] == va and e[1] == vb) or (e[0] == vb and e[1] == va):
148
- edge_faces.setdefault(ei, []).append(fi)
149
- break
150
-
151
- energy = 0.0
152
- for ei in range(len(edges)):
153
- if paper.assignments[ei] not in ("M", "V"):
154
- continue
155
- if ei not in edge_faces or len(edge_faces[ei]) < 2:
156
- continue
157
-
158
- f1, f2 = edge_faces[ei][0], edge_faces[ei][1]
159
- rho = _dihedral_angle(verts, paper.faces[f1], paper.faces[f2], edges[ei])
160
- rho_target = np.radians(paper.fold_angles[ei]) # fold_angles stored in degrees
161
- l = np.linalg.norm(verts[edges[ei][1]] - verts[edges[ei][0]])
162
- energy += 0.5 * stiff.k_fold * l * (rho - rho_target) ** 2
163
-
164
- return float(energy)
165
-
166
-
167
- def compute_total_energy(paper: Paper) -> float:
168
- """E_total = E_bar + E_facet + E_fold."""
169
- return compute_bar_energy(paper) + compute_facet_energy(paper) + compute_fold_energy(paper)
170
-
171
-
172
- # ────────────────────────────────────────────────────────────────────
173
- # Strain
174
- # ────────────────────────────────────────────────────────────────────
175
-
176
- def compute_strain(paper: Paper) -> np.ndarray:
177
- """Per-vertex Cauchy strain: average fractional edge-length deviation.
178
-
179
- Returns shape (N,) array of non-negative strain values.
180
- """
181
- n_verts = len(paper.vertices)
182
- if n_verts == 0:
183
- return np.empty(0)
184
-
185
- verts = paper.vertices
186
- edges = paper.edges
187
- rest = paper.rest_lengths
188
-
189
- # Build vertex → edge adjacency
190
- vert_edges: dict[int, list[int]] = {}
191
- for ei, e in enumerate(edges):
192
- vert_edges.setdefault(int(e[0]), []).append(ei)
193
- vert_edges.setdefault(int(e[1]), []).append(ei)
194
-
195
- strain = np.zeros(n_verts, dtype=np.float64)
196
- for vi in range(n_verts):
197
- adj = vert_edges.get(vi, [])
198
- if not adj:
199
- continue
200
- devs = []
201
- for ei in adj:
202
- v1, v2 = edges[ei]
203
- L = np.linalg.norm(verts[v1] - verts[v2])
204
- L0 = rest[ei]
205
- if L0 > 1e-15:
206
- devs.append(abs(L - L0) / L0)
207
- if devs:
208
- strain[vi] = float(np.mean(devs))
209
-
210
- return strain
211
-
212
-
213
- # ────────────────────────────────────────────────────────────────────
214
- # Dihedral angle helper
215
- # ────────────────────────────────────────────────────────────────────
216
-
217
- def _dihedral_angle(
218
- verts: np.ndarray,
219
- face1: list[int],
220
- face2: list[int],
221
- edge: np.ndarray,
222
- ) -> float:
223
- """Compute the dihedral angle (in radians) between two faces sharing *edge*.
224
-
225
- Returns angle in [0, 2*pi). Returns pi if normals cannot be computed.
226
- """
227
- n1 = _face_normal(verts, face1)
228
- n2 = _face_normal(verts, face2)
229
-
230
- if n1 is None or n2 is None:
231
- return np.pi
232
-
233
- cos_a = np.clip(np.dot(n1, n2), -1.0, 1.0)
234
- angle = np.arccos(cos_a)
235
-
236
- # Determine sign from edge direction
237
- edge_dir = verts[edge[1]] - verts[edge[0]]
238
- edge_dir = edge_dir / (np.linalg.norm(edge_dir) + 1e-30)
239
- cross = np.cross(n1, n2)
240
- if np.dot(cross, edge_dir) < 0:
241
- angle = 2.0 * np.pi - angle
242
-
243
- return float(angle)
244
-
245
-
246
- def _face_normal(verts: np.ndarray, face: list[int]) -> np.ndarray | None:
247
- """Compute outward unit normal of a face, or None if degenerate."""
248
- if len(face) < 3:
249
- return None
250
- v0 = verts[face[0]]
251
- v1 = verts[face[1]]
252
- v2 = verts[face[2]]
253
- normal = np.cross(v1 - v0, v2 - v0)
254
- norm = np.linalg.norm(normal)
255
- if norm < 1e-15:
256
- return None
257
- return normal / norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
engine/validation.py DELETED
@@ -1,256 +0,0 @@
1
- """
2
- Geometric validation for origami crease patterns.
3
-
4
- Implements Kawasaki's theorem, Maekawa's theorem, and triangle-triangle
5
- self-intersection detection.
6
- """
7
-
8
- from __future__ import annotations
9
-
10
- from dataclasses import dataclass
11
-
12
- import numpy as np
13
-
14
- from .paper import Paper
15
-
16
-
17
- # ────────────────────────────────────────────────────────────────────
18
- # Result container
19
- # ────────────────────────────────────────────────────────────────────
20
-
21
- @dataclass
22
- class ValidationResult:
23
- kawasaki_valid: bool
24
- kawasaki_violation: float
25
- maekawa_valid: bool
26
- maekawa_violation: float
27
- intersection_free: bool
28
- self_intersection_count: int
29
- is_valid: bool # all checks pass
30
-
31
-
32
- # ────────────────────────────────────────────────────────────────────
33
- # Kawasaki's theorem
34
- # ────────────────────────────────────────────────────────────────────
35
-
36
- def check_kawasaki(paper: Paper) -> tuple[bool, float]:
37
- """At each interior vertex, the alternating sum of sector angles equals pi.
38
-
39
- Specifically, for a vertex with 2n incident creases, the sum of
40
- odd-indexed sector angles equals the sum of even-indexed sector
41
- angles equals pi.
42
-
43
- Returns (is_valid, total_violation). A violation of < 1e-6 is
44
- considered valid.
45
- """
46
- verts = paper.vertices
47
- edges = paper.edges
48
- n_verts = len(verts)
49
-
50
- # Build adjacency: vertex -> list of neighbor vertices (via edges)
51
- adj: dict[int, list[int]] = {}
52
- for e in edges:
53
- adj.setdefault(int(e[0]), []).append(int(e[1]))
54
- adj.setdefault(int(e[1]), []).append(int(e[0]))
55
-
56
- # Identify boundary vertices (incident to a 'B' edge)
57
- boundary_verts: set[int] = set()
58
- for ei, e in enumerate(edges):
59
- if paper.assignments[ei] == "B":
60
- boundary_verts.add(int(e[0]))
61
- boundary_verts.add(int(e[1]))
62
-
63
- total_violation = 0.0
64
-
65
- for vi in range(n_verts):
66
- if vi in boundary_verts:
67
- continue
68
- neighbors = adj.get(vi, [])
69
- if len(neighbors) < 2:
70
- continue
71
-
72
- # Sort neighbors by angle around vi (in the XY plane for flat-foldability)
73
- center = verts[vi][:2]
74
- angles = []
75
- for ni in neighbors:
76
- d = verts[ni][:2] - center
77
- angles.append((np.arctan2(d[1], d[0]), ni))
78
- angles.sort(key=lambda x: x[0])
79
-
80
- # Sector angles
81
- sector_angles = []
82
- for k in range(len(angles)):
83
- a1 = angles[k][0]
84
- a2 = angles[(k + 1) % len(angles)][0]
85
- diff = a2 - a1
86
- if diff <= 0:
87
- diff += 2.0 * np.pi
88
- sector_angles.append(diff)
89
-
90
- if len(sector_angles) < 2:
91
- continue
92
-
93
- # Kawasaki: alternating sums should both equal pi
94
- even_sum = sum(sector_angles[i] for i in range(0, len(sector_angles), 2))
95
- odd_sum = sum(sector_angles[i] for i in range(1, len(sector_angles), 2))
96
-
97
- violation = abs(even_sum - odd_sum)
98
- total_violation += violation
99
-
100
- is_valid = total_violation < 1e-4
101
- return is_valid, float(total_violation)
102
-
103
-
104
- # ────────────────────────────────────────────────────────────────────
105
- # Maekawa's theorem
106
- # ────────────────────────────────────────────────────────────────────
107
-
108
- def check_maekawa(paper: Paper) -> tuple[bool, float]:
109
- """At each interior vertex, |M - V| = 2.
110
-
111
- Returns (is_valid, total_violation) where violation is
112
- sum of |abs(M-V) - 2| over all interior vertices.
113
- """
114
- edges = paper.edges
115
- verts = paper.vertices
116
- n_verts = len(verts)
117
-
118
- # Boundary vertices
119
- boundary_verts: set[int] = set()
120
- for ei, e in enumerate(edges):
121
- if paper.assignments[ei] == "B":
122
- boundary_verts.add(int(e[0]))
123
- boundary_verts.add(int(e[1]))
124
-
125
- # Count M and V edges per vertex
126
- m_count = [0] * n_verts
127
- v_count = [0] * n_verts
128
- total_mv_per_vertex = [0] * n_verts
129
-
130
- for ei, e in enumerate(edges):
131
- a = paper.assignments[ei]
132
- if a == "M":
133
- m_count[int(e[0])] += 1
134
- m_count[int(e[1])] += 1
135
- elif a == "V":
136
- v_count[int(e[0])] += 1
137
- v_count[int(e[1])] += 1
138
- if a in ("M", "V"):
139
- total_mv_per_vertex[int(e[0])] += 1
140
- total_mv_per_vertex[int(e[1])] += 1
141
-
142
- total_violation = 0.0
143
- for vi in range(n_verts):
144
- if vi in boundary_verts:
145
- continue
146
- # Only check vertices that actually have creases
147
- if total_mv_per_vertex[vi] == 0:
148
- continue
149
- diff = abs(m_count[vi] - v_count[vi])
150
- violation = abs(diff - 2)
151
- total_violation += violation
152
-
153
- is_valid = total_violation < 0.5 # integer theorem, so < 0.5 means exact
154
- return is_valid, float(total_violation)
155
-
156
-
157
- # ────────────────────────────────────────────────────────────────────
158
- # Self-intersection detection (triangle-triangle)
159
- # ────────────────────────────────────────────────────────────────────
160
-
161
- def check_self_intersection(paper: Paper) -> tuple[bool, int]:
162
- """Check for triangle-triangle intersections among the paper's faces.
163
-
164
- Uses the separating-axis theorem (SAT) for triangle-triangle overlap
165
- in 3-D. Faces that share an edge or vertex are skipped.
166
-
167
- Returns (is_valid, count_of_intersections).
168
- """
169
- verts = paper.vertices
170
- faces = paper.faces
171
- count = 0
172
-
173
- for i in range(len(faces)):
174
- for j in range(i + 1, len(faces)):
175
- # Skip faces that share vertices (adjacent faces)
176
- if set(faces[i]) & set(faces[j]):
177
- continue
178
- if _triangles_intersect(verts, faces[i], faces[j]):
179
- count += 1
180
-
181
- return count == 0, count
182
-
183
-
184
- def _triangles_intersect(
185
- verts: np.ndarray,
186
- face1: list[int],
187
- face2: list[int],
188
- ) -> bool:
189
- """Test whether two triangular faces intersect in 3-D using
190
- the separating-axis theorem (Moller's method simplified).
191
-
192
- For non-triangular faces, only tests the first three vertices.
193
- Returns True if the triangles intersect.
194
- """
195
- if len(face1) < 3 or len(face2) < 3:
196
- return False
197
-
198
- t1 = verts[face1[:3]]
199
- t2 = verts[face2[:3]]
200
-
201
- # 13 potential separating axes:
202
- # - normals of each triangle (2)
203
- # - cross products of edge pairs (3x3 = 9)
204
- # - edges themselves don't need separate tests in 3D SAT
205
-
206
- e1_edges = [t1[1] - t1[0], t1[2] - t1[1], t1[0] - t1[2]]
207
- e2_edges = [t2[1] - t2[0], t2[2] - t2[1], t2[0] - t2[2]]
208
-
209
- n1 = np.cross(e1_edges[0], e1_edges[1])
210
- n2 = np.cross(e2_edges[0], e2_edges[1])
211
-
212
- axes = [n1, n2]
213
- for e1 in e1_edges:
214
- for e2 in e2_edges:
215
- ax = np.cross(e1, e2)
216
- if np.linalg.norm(ax) > 1e-12:
217
- axes.append(ax)
218
-
219
- for axis in axes:
220
- norm = np.linalg.norm(axis)
221
- if norm < 1e-12:
222
- continue
223
- axis = axis / norm
224
-
225
- proj1 = np.dot(t1, axis)
226
- proj2 = np.dot(t2, axis)
227
-
228
- min1, max1 = proj1.min(), proj1.max()
229
- min2, max2 = proj2.min(), proj2.max()
230
-
231
- # Check for separation (with small tolerance for shared-edge adjacency)
232
- if max1 < min2 - 1e-9 or max2 < min1 - 1e-9:
233
- return False # separating axis found
234
-
235
- return True # no separating axis → intersection
236
-
237
-
238
- # ────────────────────────────────────────────────────────────────────
239
- # Combined validation
240
- # ────────────────────────────────────────────────────────────────────
241
-
242
- def validate_paper(paper: Paper) -> ValidationResult:
243
- """Run all validation checks and return a combined result."""
244
- k_valid, k_violation = check_kawasaki(paper)
245
- m_valid, m_violation = check_maekawa(paper)
246
- si_valid, si_count = check_self_intersection(paper)
247
-
248
- return ValidationResult(
249
- kawasaki_valid=k_valid,
250
- kawasaki_violation=k_violation,
251
- maekawa_valid=m_valid,
252
- maekawa_violation=m_violation,
253
- intersection_free=si_valid,
254
- self_intersection_count=si_count,
255
- is_valid=k_valid and m_valid and si_valid,
256
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{engine → env}/__init__.py RENAMED
File without changes
env/environment.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ from pathlib import Path
4
+ from typing import Optional
5
+
6
+ from .paper_state import PaperState
7
+ from .rewards import compute_reward, compute_terminal_reward
8
+ from .prompts import (
9
+ code_as_policy_prompt,
10
+ get_semantic_description,
11
+ step_level_prompt,
12
+ parse_fold_list,
13
+ parse_single_fold,
14
+ )
15
+ from .verifier import check_all_vertices
16
+
17
+
18
+ TARGETS_DIR = Path(__file__).parent / 'targets'
19
+
20
+
21
+ class OrigamiEnvironment:
22
+ """
23
+ OpenEnv-compatible origami crease pattern environment.
24
+
25
+ Supports two modes:
26
+ - code_as_policy: model outputs complete fold sequence, gets terminal reward
27
+ - step: model outputs one fold at a time, gets per-step reward
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ mode: str = 'code_as_policy', # 'code_as_policy' or 'step'
33
+ max_steps: int = 8,
34
+ targets_dir: Optional[str] = None,
35
+ use_semantic: bool = True,
36
+ ):
37
+ assert mode in ('code_as_policy', 'step'), f"Unknown mode: {mode}"
38
+ self.mode = mode
39
+ self.max_steps = max_steps
40
+ self.targets_dir = Path(targets_dir) if targets_dir else TARGETS_DIR
41
+ self.use_semantic = use_semantic
42
+
43
+ self.paper: Optional[PaperState] = None
44
+ self.target: Optional[dict] = None
45
+ self.target_name: Optional[str] = None
46
+ self.step_count: int = 0
47
+ self.last_reward: Optional[dict] = None
48
+
49
+ self._targets = self._load_all_targets()
50
+
51
+ def _load_all_targets(self) -> dict[str, dict]:
52
+ targets = {}
53
+ for fold_file in self.targets_dir.glob('*.fold'):
54
+ with open(fold_file) as f:
55
+ targets[fold_file.stem] = json.load(f)
56
+ return targets
57
+
58
+ def available_targets(self) -> list[str]:
59
+ return sorted(self._targets.keys())
60
+
61
+ def reset(self, target_name: Optional[str] = None) -> dict:
62
+ """
63
+ Reset environment to start of a new episode.
64
+
65
+ Args:
66
+ target_name: name of target (stem of .fold file). If None, picks level-1 randomly.
67
+
68
+ Returns:
69
+ observation dict with 'prompt' key containing the LLM prompt string.
70
+ """
71
+ import random
72
+
73
+ if target_name:
74
+ assert target_name in self._targets, f"Unknown target: {target_name}"
75
+ self.target_name = target_name
76
+ else:
77
+ # Default to level-1 targets
78
+ level1 = [k for k, v in self._targets.items() if v.get('level', 1) == 1]
79
+ self.target_name = random.choice(level1 if level1 else list(self._targets.keys()))
80
+
81
+ self.target = self._targets[self.target_name]
82
+ self.paper = PaperState()
83
+ self.step_count = 0
84
+ self.last_reward = None
85
+
86
+ return self._get_observation()
87
+
88
+ def step(self, action) -> tuple[dict, dict, bool, dict]:
89
+ """
90
+ Execute an action.
91
+
92
+ In code_as_policy mode: action is a string (model completion with <folds> tags)
93
+ OR a list of fold dicts already parsed.
94
+ In step mode: action is a string (single fold JSON) or dict.
95
+
96
+ Returns:
97
+ (observation, reward, done, info)
98
+ """
99
+ if self.mode == 'code_as_policy':
100
+ return self._step_sequence(action)
101
+ else:
102
+ return self._step_single(action)
103
+
104
+ def _step_sequence(self, action) -> tuple[dict, dict, bool, dict]:
105
+ """Execute a complete fold sequence (code-as-policy mode)."""
106
+ # Parse action if it's a string
107
+ if isinstance(action, str):
108
+ try:
109
+ folds = parse_fold_list(action)
110
+ except ValueError as e:
111
+ bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
112
+ return self._get_observation(), bad_reward, True, self._info()
113
+ else:
114
+ folds = action # already a list of dicts
115
+
116
+ # Execute each fold sequentially
117
+ last_result = {'valid': True, 'anchored': True, 'new_vertices': [], 'errors': []}
118
+ for fold in folds:
119
+ try:
120
+ p1 = fold['from']
121
+ p2 = fold['to']
122
+ assignment = fold['assignment']
123
+ except (KeyError, TypeError) as e:
124
+ last_result = {'valid': False, 'anchored': False, 'new_vertices': [], 'errors': [str(e)]}
125
+ break
126
+
127
+ last_result = self.paper.add_crease(p1, p2, assignment)
128
+ self.step_count += 1
129
+ if not last_result['valid']:
130
+ break # stop at first invalid fold, partial credit
131
+
132
+ reward = compute_terminal_reward(self.paper, self.target, self.max_steps)
133
+ self.last_reward = reward
134
+ return self._get_observation(), reward, True, self._info()
135
+
136
+ def _step_single(self, action) -> tuple[dict, dict, bool, dict]:
137
+ """Execute a single fold (step mode)."""
138
+ if isinstance(action, str):
139
+ try:
140
+ fold = parse_single_fold(action)
141
+ except ValueError as e:
142
+ bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
143
+ self.last_reward = bad_reward
144
+ done = self.step_count >= self.max_steps
145
+ return self._get_observation(), bad_reward, done, self._info()
146
+ else:
147
+ fold = action
148
+
149
+ try:
150
+ p1 = fold['from']
151
+ p2 = fold['to']
152
+ assignment = fold['assignment']
153
+ except (KeyError, TypeError) as e:
154
+ bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
155
+ self.last_reward = bad_reward
156
+ done = self.step_count >= self.max_steps
157
+ return self._get_observation(), bad_reward, done, self._info()
158
+
159
+ prev_state = copy.deepcopy(self.paper)
160
+ result = self.paper.add_crease(p1, p2, assignment)
161
+ self.step_count += 1
162
+
163
+ reward = compute_reward(
164
+ prev_state=prev_state,
165
+ action_result=result,
166
+ new_state=self.paper,
167
+ target=self.target,
168
+ step=self.step_count,
169
+ max_steps=self.max_steps,
170
+ )
171
+ self.last_reward = reward
172
+
173
+ done = (
174
+ self.step_count >= self.max_steps or
175
+ reward.get('completion', 0) > 0
176
+ )
177
+ return self._get_observation(), reward, done, self._info()
178
+
179
+ def _get_observation(self) -> dict:
180
+ """Returns observation dict with the LLM prompt and raw state."""
181
+ desc = None
182
+ if self.use_semantic and self.target_name and self.target:
183
+ desc = get_semantic_description(self.target_name, self.target)
184
+
185
+ if self.mode == 'code_as_policy':
186
+ prompt = code_as_policy_prompt(
187
+ self.target, max_folds=self.max_steps, semantic_description=desc,
188
+ )
189
+ else:
190
+ prompt = step_level_prompt(
191
+ target=self.target,
192
+ paper_state=self.paper,
193
+ step=self.step_count,
194
+ max_steps=self.max_steps,
195
+ last_reward=self.last_reward,
196
+ semantic_description=desc,
197
+ )
198
+
199
+ return {
200
+ 'prompt': prompt,
201
+ 'target_name': self.target_name,
202
+ 'step': self.step_count,
203
+ 'paper_fold_json': self.paper.graph.edges if self.paper else {},
204
+ }
205
+
206
+ def _info(self) -> dict:
207
+ """Returns diagnostic info dict for logging."""
208
+ if self.paper is None:
209
+ return {}
210
+
211
+ interior = self.paper.graph.interior_vertices()
212
+ vertex_scores = check_all_vertices(self.paper.graph)
213
+
214
+ return {
215
+ 'local_foldability': (
216
+ vertex_scores['kawasaki'] == 1.0 and
217
+ vertex_scores['maekawa'] == 1.0
218
+ ),
219
+ 'blb_satisfied': vertex_scores['blb'] == 1.0,
220
+ 'global_foldability': 'not_checked', # NP-complete (Bern-Hayes 1996)
221
+ 'n_interior_vertices': len(interior),
222
+ 'n_creases': len(self.paper.graph.crease_edges()),
223
+ 'target_name': self.target_name,
224
+ }
225
+
226
+ def state(self) -> dict:
227
+ """Returns current environment state for logging/inspection."""
228
+ return {
229
+ 'paper': {
230
+ 'vertices': dict(self.paper.graph.vertices),
231
+ 'edges': {
232
+ k: v for k, v in self.paper.graph.edges.items()
233
+ if v[2] in ('M', 'V')
234
+ },
235
+ 'fold_history': self.paper.fold_history,
236
+ },
237
+ 'target': self.target_name,
238
+ 'step': self.step_count,
239
+ 'mode': self.mode,
240
+ }
241
+
242
+ def close(self):
243
+ """Cleanup."""
244
+ pass
245
+
246
+ def clone(self) -> 'OrigamiEnvironment':
247
+ """Return a deep copy for parallel evaluation (used in GRPO)."""
248
+ new_env = OrigamiEnvironment(
249
+ mode=self.mode,
250
+ max_steps=self.max_steps,
251
+ targets_dir=str(self.targets_dir),
252
+ use_semantic=self.use_semantic,
253
+ )
254
+ if self.paper is not None:
255
+ new_env.paper = copy.deepcopy(self.paper)
256
+ new_env.target = self.target
257
+ new_env.target_name = self.target_name
258
+ new_env.step_count = self.step_count
259
+ new_env.last_reward = self.last_reward
260
+ return new_env
env/graph.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import Optional
3
+
4
+ BOUNDARY_TOL = 1e-9
5
+ VERTEX_TOL = 1e-9
6
+
7
+
8
+ class CreaseGraph:
9
+ """
10
+ Planar graph representing an origami crease pattern on a unit square.
11
+
12
+ Vertices: points in [0,1]x[0,1], deduplicated by proximity.
13
+ Edges: segments between vertices, labeled M (mountain), V (valley), or B (boundary).
14
+ """
15
+
16
+ def __init__(self):
17
+ self.vertices: dict[int, tuple[float, float]] = {}
18
+ self.edges: dict[int, tuple[int, int, str]] = {}
19
+ self.vertex_edges: dict[int, list[int]] = {}
20
+ self._next_vertex_id: int = 0
21
+ self._next_edge_id: int = 0
22
+
23
+ corners = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
24
+ for x, y in corners:
25
+ vid = self._next_vertex_id
26
+ self.vertices[vid] = (x, y)
27
+ self.vertex_edges[vid] = []
28
+ self._next_vertex_id += 1
29
+
30
+ boundary_pairs = [(0, 1), (1, 2), (2, 3), (3, 0)]
31
+ for v1, v2 in boundary_pairs:
32
+ eid = self._next_edge_id
33
+ self.edges[eid] = (v1, v2, 'B')
34
+ self.vertex_edges[v1].append(eid)
35
+ self.vertex_edges[v2].append(eid)
36
+ self._next_edge_id += 1
37
+
38
+ def add_vertex(self, x: float, y: float) -> int:
39
+ for vid, (vx, vy) in self.vertices.items():
40
+ if abs(vx - x) < VERTEX_TOL and abs(vy - y) < VERTEX_TOL:
41
+ return vid
42
+ vid = self._next_vertex_id
43
+ self.vertices[vid] = (float(x), float(y))
44
+ self.vertex_edges[vid] = []
45
+ self._next_vertex_id += 1
46
+ return vid
47
+
48
+ def add_edge(self, v1_id: int, v2_id: int, assignment: str) -> int:
49
+ pair = frozenset((v1_id, v2_id))
50
+ for eid, (ev1, ev2, _) in self.edges.items():
51
+ if frozenset((ev1, ev2)) == pair:
52
+ return eid
53
+ eid = self._next_edge_id
54
+ self.edges[eid] = (v1_id, v2_id, assignment)
55
+ self.vertex_edges[v1_id].append(eid)
56
+ self.vertex_edges[v2_id].append(eid)
57
+ self._next_edge_id += 1
58
+ return eid
59
+
60
+ def get_cyclic_edges(self, vertex_id: int) -> list[int]:
61
+ vx, vy = self.vertices[vertex_id]
62
+ edge_ids = self.vertex_edges[vertex_id]
63
+
64
+ def angle_of_edge(eid: int) -> float:
65
+ ev1, ev2, _ = self.edges[eid]
66
+ other_id = ev2 if ev1 == vertex_id else ev1
67
+ ox, oy = self.vertices[other_id]
68
+ return float(np.arctan2(oy - vy, ox - vx))
69
+
70
+ return sorted(edge_ids, key=angle_of_edge)
71
+
72
+ def interior_vertices(self) -> list[int]:
73
+ result = []
74
+ for vid, (x, y) in self.vertices.items():
75
+ if (
76
+ x > BOUNDARY_TOL
77
+ and x < 1.0 - BOUNDARY_TOL
78
+ and y > BOUNDARY_TOL
79
+ and y < 1.0 - BOUNDARY_TOL
80
+ ):
81
+ result.append(vid)
82
+ return result
83
+
84
+ def split_edge(self, edge_id: int, new_vertex_id: int) -> tuple[int, int]:
85
+ ev1, ev2, assignment = self.edges[edge_id]
86
+
87
+ del self.edges[edge_id]
88
+ if edge_id in self.vertex_edges[ev1]:
89
+ self.vertex_edges[ev1].remove(edge_id)
90
+ if edge_id in self.vertex_edges[ev2]:
91
+ self.vertex_edges[ev2].remove(edge_id)
92
+
93
+ eid1 = self._next_edge_id
94
+ self.edges[eid1] = (ev1, new_vertex_id, assignment)
95
+ self.vertex_edges[ev1].append(eid1)
96
+ self.vertex_edges[new_vertex_id].append(eid1)
97
+ self._next_edge_id += 1
98
+
99
+ eid2 = self._next_edge_id
100
+ self.edges[eid2] = (new_vertex_id, ev2, assignment)
101
+ self.vertex_edges[new_vertex_id].append(eid2)
102
+ self.vertex_edges[ev2].append(eid2)
103
+ self._next_edge_id += 1
104
+
105
+ return (eid1, eid2)
106
+
107
+ def crease_edges(self) -> list[int]:
108
+ return [eid for eid, (_, _, a) in self.edges.items() if a in ('M', 'V')]
109
+
110
+ def boundary_midpoints(self) -> list[tuple[float, float]]:
111
+ midpoints = []
112
+ for eid, (v1, v2, assignment) in self.edges.items():
113
+ if assignment == 'B':
114
+ x1, y1 = self.vertices[v1]
115
+ x2, y2 = self.vertices[v2]
116
+ midpoints.append(((x1 + x2) / 2.0, (y1 + y2) / 2.0))
117
+ return midpoints
env/paper_state.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from shapely.geometry import LineString, Point, Polygon
3
+ from typing import Optional
4
+ from .graph import CreaseGraph, VERTEX_TOL
5
+
6
+ UNIT_SQUARE_CORNERS = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
7
+
8
+ _UNIT_SQUARE = Polygon(UNIT_SQUARE_CORNERS)
9
+
10
+
11
+ class PaperState:
12
+ """
13
+ Represents the evolving crease pattern on a unit square [0,1]x[0,1].
14
+ Uses CreaseGraph for the underlying data structure.
15
+ """
16
+
17
+ def __init__(self):
18
+ self.graph = CreaseGraph()
19
+ self.fold_history: list[dict] = []
20
+
21
+ def anchor_points(self) -> list[tuple[float, float]]:
22
+ points: dict[tuple[float, float], None] = {}
23
+ for corner in UNIT_SQUARE_CORNERS:
24
+ points[corner] = None
25
+ for vid, (x, y) in self.graph.vertices.items():
26
+ points[(float(x), float(y))] = None
27
+ return list(points.keys())
28
+
29
+ def _is_anchor(self, pt: tuple[float, float]) -> bool:
30
+ px, py = pt
31
+ for ax, ay in self.anchor_points():
32
+ if abs(ax - px) < VERTEX_TOL and abs(ay - py) < VERTEX_TOL:
33
+ return True
34
+ return False
35
+
36
+ def _edge_exists(self, v1_id: int, v2_id: int) -> bool:
37
+ """Check if an edge already exists between the two vertex IDs."""
38
+ pair = frozenset((v1_id, v2_id))
39
+ for ev1, ev2, _ in self.graph.edges.values():
40
+ if frozenset((ev1, ev2)) == pair:
41
+ return True
42
+ return False
43
+
44
+ def add_crease(self, p1: list, p2: list, assignment: str) -> dict:
45
+ errors: list[str] = []
46
+
47
+ if assignment not in ('M', 'V'):
48
+ return {
49
+ 'valid': False,
50
+ 'anchored': False,
51
+ 'new_vertices': [],
52
+ 'errors': ['invalid_assignment'],
53
+ 'duplicate': False,
54
+ }
55
+
56
+ p1 = (float(p1[0]), float(p1[1]))
57
+ p2 = (float(p2[0]), float(p2[1]))
58
+
59
+ anchored = self._is_anchor(p1) and self._is_anchor(p2)
60
+
61
+ seg_len = np.hypot(p2[0] - p1[0], p2[1] - p1[1])
62
+ if seg_len < VERTEX_TOL:
63
+ errors.append('zero_length')
64
+ return {'valid': False, 'anchored': anchored, 'new_vertices': [], 'errors': errors, 'duplicate': False}
65
+
66
+ new_line = LineString([p1, p2])
67
+
68
+ if not _UNIT_SQUARE.contains(new_line) and not _UNIT_SQUARE.boundary.contains(new_line):
69
+ clipped = new_line.intersection(_UNIT_SQUARE)
70
+ if clipped.is_empty:
71
+ errors.append('outside_bounds')
72
+ return {'valid': False, 'anchored': anchored, 'new_vertices': [], 'errors': errors, 'duplicate': False}
73
+
74
+ intersection_points: list[tuple[float, float]] = []
75
+
76
+ for eid, (ev1, ev2, _) in list(self.graph.edges.items()):
77
+ ex1, ey1 = self.graph.vertices[ev1]
78
+ ex2, ey2 = self.graph.vertices[ev2]
79
+ existing_line = LineString([(ex1, ey1), (ex2, ey2)])
80
+ inter = new_line.intersection(existing_line)
81
+
82
+ if inter.is_empty:
83
+ continue
84
+
85
+ if inter.geom_type == 'Point':
86
+ ix, iy = inter.x, inter.y
87
+ ep1 = (ex1, ey1)
88
+ ep2 = (ex2, ey2)
89
+ if (
90
+ abs(ix - ep1[0]) < VERTEX_TOL and abs(iy - ep1[1]) < VERTEX_TOL
91
+ or abs(ix - ep2[0]) < VERTEX_TOL and abs(iy - ep2[1]) < VERTEX_TOL
92
+ ):
93
+ continue
94
+ intersection_points.append((ix, iy))
95
+ # MultiPoint or LineString intersections (collinear) are skipped
96
+
97
+ new_vertex_coords: list[tuple[float, float]] = []
98
+ for ix, iy in intersection_points:
99
+ before = set(self.graph.vertices.keys())
100
+ vid = self.graph.add_vertex(ix, iy)
101
+ if vid not in before:
102
+ new_vertex_coords.append((ix, iy))
103
+
104
+ for eid in list(self.graph.edges.keys()):
105
+ if eid not in self.graph.edges:
106
+ continue
107
+ ev1, ev2, _ = self.graph.edges[eid]
108
+ ex1, ey1 = self.graph.vertices[ev1]
109
+ ex2, ey2 = self.graph.vertices[ev2]
110
+ seg = LineString([(ex1, ey1), (ex2, ey2)])
111
+ pt = Point(ix, iy)
112
+ if seg.distance(pt) < VERTEX_TOL:
113
+ if ev1 != vid and ev2 != vid:
114
+ self.graph.split_edge(eid, vid)
115
+
116
+ v1_id = self.graph.add_vertex(p1[0], p1[1])
117
+ v2_id = self.graph.add_vertex(p2[0], p2[1])
118
+
119
+ waypoints = [p1] + sorted(
120
+ intersection_points,
121
+ key=lambda pt: np.hypot(pt[0] - p1[0], pt[1] - p1[1]),
122
+ ) + [p2]
123
+
124
+ waypoint_ids = []
125
+ for wp in waypoints:
126
+ wid = self.graph.add_vertex(wp[0], wp[1])
127
+ waypoint_ids.append(wid)
128
+
129
+ duplicate = any(
130
+ self._edge_exists(waypoint_ids[i], waypoint_ids[i + 1])
131
+ for i in range(len(waypoint_ids) - 1)
132
+ )
133
+
134
+ for i in range(len(waypoint_ids) - 1):
135
+ wa = waypoint_ids[i]
136
+ wb = waypoint_ids[i + 1]
137
+ if wa != wb:
138
+ self.graph.add_edge(wa, wb, assignment)
139
+
140
+ record = {
141
+ 'p1': p1,
142
+ 'p2': p2,
143
+ 'assignment': assignment,
144
+ 'anchored': anchored,
145
+ 'new_vertices': new_vertex_coords,
146
+ }
147
+ self.fold_history.append(record)
148
+
149
+ return {
150
+ 'valid': True,
151
+ 'anchored': anchored,
152
+ 'new_vertices': new_vertex_coords,
153
+ 'errors': errors,
154
+ 'duplicate': duplicate,
155
+ }
156
+
157
+ def crease_edges(self) -> list[dict]:
158
+ result = []
159
+ for eid in self.graph.crease_edges():
160
+ v1, v2, assignment = self.graph.edges[eid]
161
+ x1, y1 = self.graph.vertices[v1]
162
+ x2, y2 = self.graph.vertices[v2]
163
+ result.append({'v1': (x1, y1), 'v2': (x2, y2), 'assignment': assignment})
164
+ return result
env/prompts.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ from typing import Optional
4
+
5
+ _CORNERS = {(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)}
6
+ _BOUNDARY_X = {0.0, 1.0}
7
+ _BOUNDARY_Y = {0.0, 1.0}
8
+
9
+ # Semantic descriptions for known target patterns (used when target coords are hidden)
10
+ DESCRIPTIONS: dict[str, str] = {
11
+ "valley_fold_half": "Fold the paper in half with a single valley fold along the horizontal center line.",
12
+ "mountain_fold_half": "Fold the paper in half with a single mountain fold along the vertical center line.",
13
+ "letter_fold": "Create a letter fold (two parallel valley folds dividing the paper into thirds).",
14
+ "diagonal_fold": "Fold the paper diagonally from one corner to the opposite corner.",
15
+ "waterbomb_base": "Create a waterbomb base with two valley folds along both diagonals.",
16
+ }
17
+
18
+
19
+ def _is_corner(x: float, y: float) -> bool:
20
+ return (round(x, 4), round(y, 4)) in _CORNERS
21
+
22
+
23
+ def _is_boundary(x: float, y: float) -> bool:
24
+ return x in _BOUNDARY_X or y in _BOUNDARY_Y
25
+
26
+
27
+ def format_target_for_prompt(target: dict) -> str:
28
+ vertices = target["vertices_coords"]
29
+ edges_v = target["edges_vertices"]
30
+ edges_a = target["edges_assignment"]
31
+
32
+ lines = []
33
+ for (v1, v2), assignment in zip(edges_v, edges_a):
34
+ if assignment not in ("M", "V"):
35
+ continue
36
+ x1, y1 = vertices[v1]
37
+ x2, y2 = vertices[v2]
38
+ label = "Mountain" if assignment == "M" else "Valley"
39
+ lines.append(
40
+ f"{label} fold: ({round(x1, 4)}, {round(y1, 4)}) -> ({round(x2, 4)}, {round(y2, 4)})"
41
+ )
42
+ return "\n".join(lines)
43
+
44
+
45
+ def get_semantic_description(target_name: str, target: dict) -> str:
46
+ """Return a natural language description of the target crease pattern."""
47
+ if target_name in DESCRIPTIONS:
48
+ return DESCRIPTIONS[target_name]
49
+
50
+ # Fallback: generate from target dict structure
51
+ edges_a = target.get("edges_assignment", [])
52
+ valley_count = sum(1 for a in edges_a if a == "V")
53
+ mountain_count = sum(1 for a in edges_a if a == "M")
54
+ if valley_count or mountain_count:
55
+ parts = []
56
+ if valley_count:
57
+ parts.append(f"{valley_count} valley fold{'s' if valley_count != 1 else ''}")
58
+ if mountain_count:
59
+ parts.append(f"{mountain_count} mountain fold{'s' if mountain_count != 1 else ''}")
60
+ return f"Create an origami crease pattern with {', '.join(parts)}."
61
+ return "Create an origami crease pattern with the given folds."
62
+
63
+
64
+ def format_anchor_points(paper_state) -> str:
65
+ corners = []
66
+ boundary_pts = []
67
+ intersections = []
68
+
69
+ for x, y in paper_state.anchor_points():
70
+ rx, ry = round(x, 4), round(y, 4)
71
+ if _is_corner(rx, ry):
72
+ corners.append((rx, ry))
73
+ elif _is_boundary(rx, ry):
74
+ boundary_pts.append((rx, ry))
75
+ else:
76
+ intersections.append((rx, ry))
77
+
78
+ def fmt_pts(pts: list[tuple[float, float]]) -> str:
79
+ return " ".join(f"({x},{y})" for x, y in pts)
80
+
81
+ lines = []
82
+ if corners:
83
+ lines.append(f" Corners: {fmt_pts(corners)}")
84
+ if boundary_pts:
85
+ lines.append(f" Boundary pts: {fmt_pts(boundary_pts)}")
86
+ if intersections:
87
+ lines.append(f" Intersections: {fmt_pts(intersections)}")
88
+
89
+ return "\n".join(lines)
90
+
91
+
92
+ def format_crease_history(paper_state) -> str:
93
+ history = paper_state.fold_history
94
+ if not history:
95
+ return "none"
96
+
97
+ lines = []
98
+ for i, fold in enumerate(history, 1):
99
+ p1, p2 = fold["p1"], fold["p2"]
100
+ assignment = fold["assignment"]
101
+ label = "Mountain" if assignment == "M" else "Valley"
102
+ x1, y1 = round(p1[0], 4), round(p1[1], 4)
103
+ x2, y2 = round(p2[0], 4), round(p2[1], 4)
104
+ lines.append(f" {i}. {label} fold: ({x1}, {y1}) -> ({x2}, {y2})")
105
+
106
+ return "\n".join(lines)
107
+
108
+
109
+ def format_reward_feedback(reward: Optional[dict]) -> str:
110
+ if not reward:
111
+ return "(no feedback yet)"
112
+
113
+ keys = ["kawasaki", "maekawa", "blb", "progress", "economy", "total"]
114
+ parts = []
115
+ for k in keys:
116
+ if k in reward:
117
+ parts.append(f"{k}={reward[k]:.2f}")
118
+
119
+ for k, v in reward.items():
120
+ if k not in keys:
121
+ parts.append(f"{k}={v:.2f}")
122
+
123
+ return " " + " ".join(parts)
124
+
125
+
126
+ def code_as_policy_prompt(
127
+ target: dict,
128
+ max_folds: int = 8,
129
+ semantic_description: Optional[str] = None,
130
+ ) -> str:
131
+ if semantic_description is not None:
132
+ target_section = f"""TASK:
133
+ {semantic_description}
134
+
135
+ You are an origami designer. Given a description of what to fold, output a sequence of fold operations that build a crease pattern on a unit square [0,1]x[0,1]."""
136
+ else:
137
+ formatted_target = format_target_for_prompt(target)
138
+ target_section = f"""TARGET CREASE PATTERN:
139
+ {formatted_target}
140
+
141
+ You are an origami designer. Generate a fold sequence for a unit square [0,1]x[0,1]."""
142
+
143
+ return f"""{target_section}
144
+
145
+ RULES (must hold at every interior vertex):
146
+ - Kawasaki: alternating sector angles sum equally (each half = 180 degrees)
147
+ - Maekawa: |mountain_count - valley_count| = 2
148
+ - Big-Little-Big: folds bounding the smallest sector must have opposite types (one M, one V)
149
+
150
+ INITIAL ANCHOR POINTS (valid fold endpoints — new ones appear when creases intersect):
151
+ Corners: (0.0,0.0) (1.0,0.0) (1.0,1.0) (0.0,1.0)
152
+ Midpoints: (0.0,0.5) (0.5,0.0) (1.0,0.5) (0.5,1.0)
153
+ Note: new anchor points are created at crease intersections.
154
+
155
+ Output at most {max_folds} folds. Both endpoints must be valid anchor points.
156
+ Output ONLY the JSON list, wrapped in <folds> tags:
157
+
158
+ <folds>
159
+ [
160
+ {{"instruction": "Describe the fold in plain English", "from": [x1, y1], "to": [x2, y2], "assignment": "V"}},
161
+ {{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"}}
162
+ ]
163
+ </folds>"""
164
+
165
+
166
+ def step_level_prompt(
167
+ target: dict,
168
+ paper_state,
169
+ step: int,
170
+ max_steps: int,
171
+ last_reward: Optional[dict] = None,
172
+ semantic_description: Optional[str] = None,
173
+ ) -> str:
174
+ if semantic_description is not None:
175
+ target_section = f"""TASK:
176
+ {semantic_description}
177
+
178
+ You are an origami designer. Given a description of what to fold, add the next crease to build the pattern."""
179
+ else:
180
+ formatted_target = format_target_for_prompt(target)
181
+ target_section = f"""TARGET:
182
+ {formatted_target}
183
+
184
+ You are an origami designer building a crease pattern step by step."""
185
+
186
+ formatted_history = format_crease_history(paper_state)
187
+ formatted_anchors = format_anchor_points(paper_state)
188
+ formatted_reward = format_reward_feedback(last_reward)
189
+
190
+ return f"""{target_section}
191
+
192
+ CURRENT STATE (step {step} of {max_steps}):
193
+ Creases placed:
194
+ {formatted_history}
195
+
196
+ AVAILABLE ANCHOR POINTS:
197
+ {formatted_anchors}
198
+
199
+ LAST REWARD:
200
+ {formatted_reward}
201
+
202
+ Add the NEXT crease. Both endpoints must be listed anchor points above.
203
+ Output ONLY valid JSON (no extra text):
204
+ {{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M" or "V"}}"""
205
+
206
+
207
+ def parse_fold_list(completion: str) -> list[dict]:
208
+ match = re.search(r"<folds>(.*?)</folds>", completion, re.IGNORECASE | re.DOTALL)
209
+ if not match:
210
+ raise ValueError("No <folds>...</folds> tags found in completion")
211
+
212
+ raw = match.group(1).strip()
213
+
214
+ try:
215
+ data = json.loads(raw)
216
+ except json.JSONDecodeError as e:
217
+ raise ValueError(f"Failed to parse JSON inside <folds> tags: {e}") from e
218
+
219
+ if not isinstance(data, list):
220
+ raise ValueError(f"Expected a JSON list inside <folds> tags, got {type(data).__name__}")
221
+
222
+ cleaned = []
223
+ for i, item in enumerate(data):
224
+ if not isinstance(item, dict):
225
+ raise ValueError(f"Fold {i} is not a dict: {item!r}")
226
+
227
+ for field in ("from", "to", "assignment"):
228
+ if field not in item:
229
+ raise ValueError(f"Fold {i} missing required field '{field}'")
230
+
231
+ from_pt = item["from"]
232
+ to_pt = item["to"]
233
+
234
+ if (
235
+ not isinstance(from_pt, list)
236
+ or len(from_pt) != 2
237
+ or not all(isinstance(v, (int, float)) for v in from_pt)
238
+ ):
239
+ raise ValueError(f"Fold {i} 'from' must be a list of 2 numbers, got {from_pt!r}")
240
+
241
+ if (
242
+ not isinstance(to_pt, list)
243
+ or len(to_pt) != 2
244
+ or not all(isinstance(v, (int, float)) for v in to_pt)
245
+ ):
246
+ raise ValueError(f"Fold {i} 'to' must be a list of 2 numbers, got {to_pt!r}")
247
+
248
+ if not isinstance(item["assignment"], str):
249
+ raise ValueError(f"Fold {i} 'assignment' must be a string")
250
+
251
+ cleaned.append(
252
+ {
253
+ "from": [float(from_pt[0]), float(from_pt[1])],
254
+ "to": [float(to_pt[0]), float(to_pt[1])],
255
+ "assignment": item["assignment"],
256
+ "instruction": item.get("instruction", ""),
257
+ }
258
+ )
259
+
260
+ return cleaned
261
+
262
+
263
+ def parse_single_fold(completion: str) -> dict:
264
+ start = completion.find("{")
265
+ end = completion.rfind("}")
266
+
267
+ if start == -1 or end == -1 or end <= start:
268
+ raise ValueError("No JSON object found in completion")
269
+
270
+ raw = completion[start : end + 1]
271
+
272
+ try:
273
+ data = json.loads(raw)
274
+ except json.JSONDecodeError as e:
275
+ raise ValueError(f"Failed to parse JSON from completion: {e}") from e
276
+
277
+ if not isinstance(data, dict):
278
+ raise ValueError(f"Expected a JSON object, got {type(data).__name__}")
279
+
280
+ for field in ("from", "to", "assignment"):
281
+ if field not in data:
282
+ raise ValueError(f"Missing required field '{field}' in fold JSON")
283
+
284
+ return data
env/rewards.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from .verifier import check_all_vertices, check_degree_sanity, geometric_crease_coverage
3
+ from .paper_state import PaperState
4
+
5
+
6
+ def load_target(target_path: str) -> dict:
7
+ """Load a .fold target file and return it as a dict."""
8
+ with open(target_path) as f:
9
+ return json.load(f)
10
+
11
+
12
+ def target_crease_edges(target: dict) -> list[dict]:
13
+ """
14
+ Extract crease edges from a FOLD target dict as list of
15
+ {'v1': (x1,y1), 'v2': (x2,y2), 'assignment': 'M'|'V'} dicts.
16
+ """
17
+ verts = target['vertices_coords']
18
+ result = []
19
+ for i, (v1_idx, v2_idx) in enumerate(target['edges_vertices']):
20
+ assignment = target['edges_assignment'][i]
21
+ if assignment in ('M', 'V'):
22
+ result.append({
23
+ 'v1': tuple(verts[v1_idx]),
24
+ 'v2': tuple(verts[v2_idx]),
25
+ 'assignment': assignment,
26
+ })
27
+ return result
28
+
29
+
30
+ def compute_reward(
31
+ prev_state: PaperState,
32
+ action_result: dict,
33
+ new_state: PaperState,
34
+ target: dict,
35
+ step: int,
36
+ max_steps: int,
37
+ ) -> dict:
38
+ """
39
+ Compute the full reward dict for a fold action (lexicographically gated).
40
+
41
+ Args:
42
+ prev_state: PaperState BEFORE the action was applied
43
+ action_result: {'valid': bool, 'anchored': bool, 'duplicate': bool, ...}
44
+ new_state: PaperState AFTER the action was applied
45
+ target: FOLD target dict
46
+ step: current step index
47
+ max_steps: maximum steps in episode
48
+
49
+ Returns dict with keys:
50
+ format, anchored, novelty, kawasaki, maekawa, blb, degree_sanity,
51
+ progress, economy, assignment_accuracy, delta, regression,
52
+ completion, efficiency, total
53
+ """
54
+ r = {}
55
+
56
+ # GATE 1: Format — did the action parse and apply?
57
+ r['format'] = 1.0 if action_result.get('valid', False) else 0.0
58
+ if not r['format']:
59
+ r['total'] = -0.1
60
+ return r
61
+
62
+ # GATE 2: Structural sanity
63
+ r['anchored'] = 1.0 if action_result.get('anchored', False) else 0.3
64
+ r['novelty'] = 0.0 if action_result.get('duplicate', False) is True else 0.2
65
+
66
+ # LEVEL 3: Local flat-foldability
67
+ vertex_scores = check_all_vertices(new_state.graph)
68
+ r['kawasaki'] = vertex_scores['kawasaki']
69
+ r['maekawa'] = vertex_scores['maekawa']
70
+ r['blb'] = vertex_scores['blb']
71
+ r['degree_sanity'] = check_degree_sanity(new_state.graph)
72
+
73
+ # LEVEL 4: Progress (absolute + delta)
74
+ t_edges = target_crease_edges(target)
75
+ old_coverage, _, _ = geometric_crease_coverage(prev_state, t_edges)
76
+ new_coverage, economy, assignment_accuracy = geometric_crease_coverage(new_state, t_edges)
77
+
78
+ r['progress'] = new_coverage
79
+ r['economy'] = economy
80
+ r['assignment_accuracy'] = assignment_accuracy
81
+ r['delta'] = max(0.0, new_coverage - old_coverage)
82
+ r['regression'] = min(0.0, new_coverage - old_coverage)
83
+
84
+ # LEVEL 5: Completion bonus
85
+ all_valid = (
86
+ r['kawasaki'] == 1.0
87
+ and r['maekawa'] == 1.0
88
+ and r['blb'] == 1.0
89
+ )
90
+ r['completion'] = 10.0 if (r['progress'] > 0.9 and all_valid) else 0.0
91
+
92
+ # LEVEL 6: Efficiency — escalating step cost
93
+ r['efficiency'] = -0.01 * (1 + step / max_steps)
94
+
95
+ # Weighted total
96
+ r['total'] = (
97
+ 0.05 * r['anchored']
98
+ + 0.05 * r['novelty']
99
+ + 0.06 * r['kawasaki']
100
+ + 0.06 * r['maekawa']
101
+ + 0.04 * r['blb']
102
+ + 0.04 * r['degree_sanity']
103
+ + 0.25 * r['progress']
104
+ + 0.05 * r['economy']
105
+ + 0.05 * r['assignment_accuracy']
106
+ + 0.20 * r['delta']
107
+ + 0.10 * r['regression']
108
+ + r['completion']
109
+ + r['efficiency']
110
+ )
111
+ return r
112
+
113
+
114
+ def compute_terminal_reward(
115
+ state: PaperState,
116
+ target: dict,
117
+ max_steps: int,
118
+ ) -> dict:
119
+ """
120
+ Compute reward for the final state after a complete fold sequence.
121
+ Uses fresh PaperState as baseline and step = max_steps.
122
+ """
123
+ fake_result = {
124
+ 'valid': True,
125
+ 'anchored': True,
126
+ 'duplicate': False,
127
+ }
128
+ return compute_reward(
129
+ prev_state=PaperState(),
130
+ action_result=fake_result,
131
+ new_state=state,
132
+ target=target,
133
+ step=max_steps,
134
+ max_steps=max_steps,
135
+ )
{planner → env/targets}/__init__.py RENAMED
File without changes
env/targets/accordion_3h.fold ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0],
7
+ [0.0, 0.25],
8
+ [1.0, 0.25],
9
+ [0.0, 0.5],
10
+ [1.0, 0.5],
11
+ [0.0, 0.75],
12
+ [1.0, 0.75]
13
+ ],
14
+ "edges_vertices": [
15
+ [0, 1],
16
+ [1, 5],
17
+ [5, 7],
18
+ [7, 9],
19
+ [9, 2],
20
+ [2, 3],
21
+ [3, 8],
22
+ [8, 6],
23
+ [6, 4],
24
+ [4, 0],
25
+ [4, 5],
26
+ [6, 7],
27
+ [8, 9]
28
+ ],
29
+ "edges_assignment": [
30
+ "B",
31
+ "B",
32
+ "B",
33
+ "B",
34
+ "B",
35
+ "B",
36
+ "B",
37
+ "B",
38
+ "B",
39
+ "B",
40
+ "V",
41
+ "M",
42
+ "V"
43
+ ],
44
+ "edges_foldAngle": [
45
+ 0,
46
+ 0,
47
+ 0,
48
+ 0,
49
+ 0,
50
+ 0,
51
+ 0,
52
+ 0,
53
+ 0,
54
+ 0,
55
+ -180,
56
+ -180,
57
+ -180
58
+ ],
59
+ "faces_vertices": [
60
+ [0, 1, 5, 4],
61
+ [4, 5, 7, 6],
62
+ [6, 7, 9, 8],
63
+ [8, 9, 2, 3]
64
+ ],
65
+ "level": 3,
66
+ "description": "Three alternating horizontal folds at y=0.25 (valley), y=0.5 (mountain), y=0.75 (valley) forming an accordion"
67
+ }
env/targets/accordion_4h.fold ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0],
7
+ [0.0, 0.2],
8
+ [1.0, 0.2],
9
+ [0.0, 0.4],
10
+ [1.0, 0.4],
11
+ [0.0, 0.6],
12
+ [1.0, 0.6],
13
+ [0.0, 0.8],
14
+ [1.0, 0.8]
15
+ ],
16
+ "edges_vertices": [
17
+ [0, 1],
18
+ [1, 5],
19
+ [5, 7],
20
+ [7, 9],
21
+ [9, 11],
22
+ [11, 2],
23
+ [2, 3],
24
+ [3, 10],
25
+ [10, 8],
26
+ [8, 6],
27
+ [6, 4],
28
+ [4, 0],
29
+ [4, 5],
30
+ [6, 7],
31
+ [8, 9],
32
+ [10, 11]
33
+ ],
34
+ "edges_assignment": [
35
+ "B",
36
+ "B",
37
+ "B",
38
+ "B",
39
+ "B",
40
+ "B",
41
+ "B",
42
+ "B",
43
+ "B",
44
+ "B",
45
+ "B",
46
+ "B",
47
+ "V",
48
+ "M",
49
+ "V",
50
+ "M"
51
+ ],
52
+ "edges_foldAngle": [
53
+ 0,
54
+ 0,
55
+ 0,
56
+ 0,
57
+ 0,
58
+ 0,
59
+ 0,
60
+ 0,
61
+ 0,
62
+ 0,
63
+ 0,
64
+ 0,
65
+ -180,
66
+ -180,
67
+ -180,
68
+ -180
69
+ ],
70
+ "faces_vertices": [
71
+ [0, 1, 5, 4],
72
+ [4, 5, 7, 6],
73
+ [6, 7, 9, 8],
74
+ [8, 9, 11, 10],
75
+ [10, 11, 2, 3]
76
+ ],
77
+ "level": 3,
78
+ "description": "Four alternating horizontal folds at y=0.2 (valley), y=0.4 (mountain), y=0.6 (valley), y=0.8 (mountain) forming an accordion"
79
+ }
env/targets/diagonal_anti.fold ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0]
7
+ ],
8
+ "edges_vertices": [
9
+ [0, 1],
10
+ [1, 2],
11
+ [2, 3],
12
+ [3, 0],
13
+ [1, 3]
14
+ ],
15
+ "edges_assignment": [
16
+ "B",
17
+ "B",
18
+ "B",
19
+ "B",
20
+ "M"
21
+ ],
22
+ "edges_foldAngle": [
23
+ 0,
24
+ 0,
25
+ 0,
26
+ 0,
27
+ -180
28
+ ],
29
+ "faces_vertices": [
30
+ [0, 1, 3],
31
+ [1, 2, 3]
32
+ ],
33
+ "level": 1,
34
+ "description": "One mountain fold along the anti-diagonal from (1,0) to (0,1)"
35
+ }
env/targets/diagonal_main.fold ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0]
7
+ ],
8
+ "edges_vertices": [
9
+ [0, 1],
10
+ [1, 2],
11
+ [2, 3],
12
+ [3, 0],
13
+ [0, 2]
14
+ ],
15
+ "edges_assignment": [
16
+ "B",
17
+ "B",
18
+ "B",
19
+ "B",
20
+ "V"
21
+ ],
22
+ "edges_foldAngle": [
23
+ 0,
24
+ 0,
25
+ 0,
26
+ 0,
27
+ -180
28
+ ],
29
+ "faces_vertices": [
30
+ [0, 1, 2],
31
+ [0, 2, 3]
32
+ ],
33
+ "level": 1,
34
+ "description": "One valley fold along the main diagonal from (0,0) to (1,1)"
35
+ }
env/targets/half_horizontal.fold ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0],
7
+ [0.0, 0.5],
8
+ [1.0, 0.5]
9
+ ],
10
+ "edges_vertices": [
11
+ [0, 1],
12
+ [1, 5],
13
+ [5, 2],
14
+ [2, 3],
15
+ [3, 4],
16
+ [4, 0],
17
+ [4, 5]
18
+ ],
19
+ "edges_assignment": [
20
+ "B",
21
+ "B",
22
+ "B",
23
+ "B",
24
+ "B",
25
+ "B",
26
+ "V"
27
+ ],
28
+ "edges_foldAngle": [
29
+ 0,
30
+ 0,
31
+ 0,
32
+ 0,
33
+ 0,
34
+ 0,
35
+ -180
36
+ ],
37
+ "faces_vertices": [
38
+ [0, 1, 5, 4],
39
+ [4, 5, 2, 3]
40
+ ],
41
+ "level": 1,
42
+ "description": "One valley fold along y=0.5, folding the paper in half horizontally"
43
+ }
env/targets/half_vertical.fold ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0],
7
+ [0.5, 0.0],
8
+ [0.5, 1.0]
9
+ ],
10
+ "edges_vertices": [
11
+ [0, 4],
12
+ [4, 1],
13
+ [1, 2],
14
+ [2, 5],
15
+ [5, 3],
16
+ [3, 0],
17
+ [4, 5]
18
+ ],
19
+ "edges_assignment": [
20
+ "B",
21
+ "B",
22
+ "B",
23
+ "B",
24
+ "B",
25
+ "B",
26
+ "M"
27
+ ],
28
+ "edges_foldAngle": [
29
+ 0,
30
+ 0,
31
+ 0,
32
+ 0,
33
+ 0,
34
+ 0,
35
+ -180
36
+ ],
37
+ "faces_vertices": [
38
+ [0, 4, 5, 3],
39
+ [4, 1, 2, 5]
40
+ ],
41
+ "level": 1,
42
+ "description": "One mountain fold along x=0.5, folding the paper in half vertically"
43
+ }
env/targets/thirds_h.fold ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0],
7
+ [0.0, 0.3333333333333333],
8
+ [1.0, 0.3333333333333333],
9
+ [0.0, 0.6666666666666666],
10
+ [1.0, 0.6666666666666666]
11
+ ],
12
+ "edges_vertices": [
13
+ [0, 1],
14
+ [1, 5],
15
+ [5, 7],
16
+ [7, 2],
17
+ [2, 3],
18
+ [3, 6],
19
+ [6, 4],
20
+ [4, 0],
21
+ [4, 5],
22
+ [6, 7]
23
+ ],
24
+ "edges_assignment": [
25
+ "B",
26
+ "B",
27
+ "B",
28
+ "B",
29
+ "B",
30
+ "B",
31
+ "B",
32
+ "B",
33
+ "V",
34
+ "V"
35
+ ],
36
+ "edges_foldAngle": [
37
+ 0,
38
+ 0,
39
+ 0,
40
+ 0,
41
+ 0,
42
+ 0,
43
+ 0,
44
+ 0,
45
+ -180,
46
+ -180
47
+ ],
48
+ "faces_vertices": [
49
+ [0, 1, 5, 4],
50
+ [4, 5, 7, 6],
51
+ [6, 7, 2, 3]
52
+ ],
53
+ "level": 2,
54
+ "description": "Two parallel valley folds at y=1/3 and y=2/3, dividing the paper into horizontal thirds"
55
+ }
env/targets/thirds_v.fold ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vertices_coords": [
3
+ [0.0, 0.0],
4
+ [1.0, 0.0],
5
+ [1.0, 1.0],
6
+ [0.0, 1.0],
7
+ [0.3333333333333333, 0.0],
8
+ [0.6666666666666666, 0.0],
9
+ [0.3333333333333333, 1.0],
10
+ [0.6666666666666666, 1.0]
11
+ ],
12
+ "edges_vertices": [
13
+ [0, 4],
14
+ [4, 5],
15
+ [5, 1],
16
+ [1, 2],
17
+ [2, 7],
18
+ [7, 6],
19
+ [6, 3],
20
+ [3, 0],
21
+ [4, 6],
22
+ [5, 7]
23
+ ],
24
+ "edges_assignment": [
25
+ "B",
26
+ "B",
27
+ "B",
28
+ "B",
29
+ "B",
30
+ "B",
31
+ "B",
32
+ "B",
33
+ "M",
34
+ "M"
35
+ ],
36
+ "edges_foldAngle": [
37
+ 0,
38
+ 0,
39
+ 0,
40
+ 0,
41
+ 0,
42
+ 0,
43
+ 0,
44
+ 0,
45
+ -180,
46
+ -180
47
+ ],
48
+ "faces_vertices": [
49
+ [0, 4, 6, 3],
50
+ [4, 5, 7, 6],
51
+ [5, 1, 2, 7]
52
+ ],
53
+ "level": 2,
54
+ "description": "Two parallel mountain folds at x=1/3 and x=2/3, dividing the paper into vertical thirds"
55
+ }
env/targets/validator.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Validates all .fold target files against origami theorems.
3
+ Run directly: python -m env.targets.validator
4
+ """
5
+ import json
6
+ import os
7
+ import sys
8
+ from pathlib import Path
9
+
10
+ from ..graph import CreaseGraph
11
+ from ..verifier import check_kawasaki_at_vertex, check_maekawa_at_vertex, check_blb_at_vertex
12
+
13
+
14
+ def build_graph_from_fold(fold_data: dict) -> CreaseGraph:
15
+ """
16
+ Reconstruct a CreaseGraph from a FOLD JSON dict.
17
+ Used to validate target files.
18
+ """
19
+ graph = CreaseGraph()
20
+
21
+ verts = fold_data['vertices_coords']
22
+ edges = fold_data['edges_vertices']
23
+ assignments = fold_data['edges_assignment']
24
+
25
+ # Map file vertex indices to graph vertex IDs
26
+ vert_map = {}
27
+ for i, (x, y) in enumerate(verts):
28
+ vid = graph.add_vertex(float(x), float(y))
29
+ vert_map[i] = vid
30
+
31
+ # Add edges (boundary edges from init may already exist, add_edge handles dedup)
32
+ for i, (v1_idx, v2_idx) in enumerate(edges):
33
+ v1_id = vert_map[v1_idx]
34
+ v2_id = vert_map[v2_idx]
35
+ assignment = assignments[i]
36
+ graph.add_edge(v1_id, v2_id, assignment)
37
+
38
+ return graph
39
+
40
+
41
+ def validate_target(fold_path: str) -> dict:
42
+ """
43
+ Validate a single .fold target file.
44
+ Returns {'file': str, 'valid': bool, 'issues': list[str], 'interior_vertices': int}
45
+ """
46
+ with open(fold_path) as f:
47
+ fold_data = json.load(f)
48
+
49
+ issues = []
50
+
51
+ # Basic structure checks
52
+ required = ['vertices_coords', 'edges_vertices', 'edges_assignment', 'edges_foldAngle']
53
+ for field in required:
54
+ if field not in fold_data:
55
+ issues.append(f"Missing field: {field}")
56
+
57
+ if issues:
58
+ return {'file': os.path.basename(fold_path), 'valid': False, 'issues': issues, 'interior_vertices': -1}
59
+
60
+ n_edges = len(fold_data['edges_vertices'])
61
+ if len(fold_data['edges_assignment']) != n_edges:
62
+ issues.append("edges_assignment length mismatch")
63
+ if len(fold_data['edges_foldAngle']) != n_edges:
64
+ issues.append("edges_foldAngle length mismatch")
65
+
66
+ # Build graph and check theorems
67
+ graph = build_graph_from_fold(fold_data)
68
+ interior = graph.interior_vertices()
69
+
70
+ for v_id in interior:
71
+ ok, alt_sum = check_kawasaki_at_vertex(v_id, graph)
72
+ if not ok:
73
+ issues.append(f"Kawasaki violated at vertex {v_id} (alt_sum={alt_sum:.6f})")
74
+
75
+ if not check_maekawa_at_vertex(v_id, graph):
76
+ issues.append(f"Maekawa violated at vertex {v_id}")
77
+
78
+ blb_violations = check_blb_at_vertex(v_id, graph)
79
+ if blb_violations:
80
+ issues.append(f"BLB violated at vertex {v_id}: {blb_violations}")
81
+
82
+ return {
83
+ 'file': os.path.basename(fold_path),
84
+ 'valid': len(issues) == 0,
85
+ 'issues': issues,
86
+ 'interior_vertices': len(interior),
87
+ }
88
+
89
+
90
+ def validate_all(targets_dir: str = None) -> bool:
91
+ """Validate all .fold files in the targets directory. Returns True if all pass."""
92
+ if targets_dir is None:
93
+ targets_dir = Path(__file__).parent
94
+
95
+ all_pass = True
96
+ fold_files = sorted(Path(targets_dir).glob('*.fold'))
97
+
98
+ if not fold_files:
99
+ print("No .fold files found")
100
+ return False
101
+
102
+ for fold_path in fold_files:
103
+ result = validate_target(str(fold_path))
104
+ status = "OK" if result['valid'] else "FAIL"
105
+ n_interior = result['interior_vertices']
106
+ print(f" [{status}] {result['file']} — {n_interior} interior vertices")
107
+ if result['issues']:
108
+ for issue in result['issues']:
109
+ print(f" ! {issue}")
110
+ if not result['valid']:
111
+ all_pass = False
112
+
113
+ return all_pass
114
+
115
+
116
+ if __name__ == '__main__':
117
+ print("Validating targets...")
118
+ ok = validate_all()
119
+ sys.exit(0 if ok else 1)
env/targets/validator_check.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+
5
+ targets_dir = Path(__file__).parent
6
+ for fname in os.listdir(targets_dir):
7
+ if not fname.endswith(".fold"):
8
+ continue
9
+ with open(targets_dir / fname) as f:
10
+ d = json.load(f)
11
+ n_v = len(d["vertices_coords"])
12
+ n_e = len(d["edges_vertices"])
13
+ assert len(d["edges_assignment"]) == n_e, f"{fname}: assignment length mismatch"
14
+ assert len(d["edges_foldAngle"]) == n_e, f"{fname}: foldAngle length mismatch"
15
+ for e in d["edges_vertices"]:
16
+ assert e[0] < n_v and e[1] < n_v, f"{fname}: edge references invalid vertex"
17
+ for face in d["faces_vertices"]:
18
+ for vi in face:
19
+ assert vi < n_v, f"{fname}: face references invalid vertex"
20
+ creases = [i for i,a in enumerate(d["edges_assignment"]) if a in ('M','V')]
21
+ print(f"{fname}: {n_v} vertices, {n_e} edges, {len(creases)} creases, level={d.get('level','?')} OK")
env/verifier.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from .graph import CreaseGraph
3
+ from .paper_state import PaperState
4
+
5
+
6
+ def _compute_sector_angles(vertex_id: int, graph: CreaseGraph) -> list[float]:
7
+ """Compute consecutive sector angles (CCW) at a vertex from its cyclic edges."""
8
+ cyclic_edges = graph.get_cyclic_edges(vertex_id)
9
+ n = len(cyclic_edges)
10
+ vx, vy = graph.vertices[vertex_id]
11
+
12
+ angles = []
13
+ for eid in cyclic_edges:
14
+ ev1, ev2, _ = graph.edges[eid]
15
+ other_id = ev2 if ev1 == vertex_id else ev1
16
+ ox, oy = graph.vertices[other_id]
17
+ angles.append(np.arctan2(oy - vy, ox - vx))
18
+
19
+ sectors = []
20
+ for i in range(n):
21
+ diff = angles[(i + 1) % n] - angles[i]
22
+ if diff < 0:
23
+ diff += 2 * np.pi
24
+ if diff > 2 * np.pi:
25
+ diff -= 2 * np.pi
26
+ sectors.append(diff)
27
+
28
+ return sectors
29
+
30
+
31
+ def check_kawasaki_at_vertex(vertex_id: int, graph: CreaseGraph) -> tuple[bool, float]:
32
+ """
33
+ Checks Kawasaki-Justin theorem at a single vertex.
34
+
35
+ Kawasaki: at an interior vertex with 2n creases, the alternating sum
36
+ of consecutive sector angles = 0.
37
+ Equivalently: sum(odd-indexed sectors) == sum(even-indexed sectors) == π.
38
+
39
+ Returns (satisfied: bool, |alternating_sum|: float).
40
+ Returns (True, 0.0) for vertices with degree < 4 (not an interior fold vertex yet).
41
+ Returns (False, inf) for odd-degree vertices (impossible for flat folds).
42
+ """
43
+ cyclic_edges = graph.get_cyclic_edges(vertex_id)
44
+ n = len(cyclic_edges)
45
+
46
+ if n % 2 != 0:
47
+ return (False, float('inf'))
48
+
49
+ if n < 4:
50
+ return (True, 0.0)
51
+
52
+ sectors = _compute_sector_angles(vertex_id, graph)
53
+ alt_sum = sum(s * ((-1) ** i) for i, s in enumerate(sectors))
54
+ return (abs(alt_sum) < 1e-9, abs(alt_sum))
55
+
56
+
57
+ def check_maekawa_at_vertex(vertex_id: int, graph: CreaseGraph) -> bool:
58
+ """
59
+ Checks Maekawa-Justin theorem at a single vertex.
60
+
61
+ Maekawa: |M - V| == 2 where M, V are counts of mountain/valley fold edges
62
+ at the vertex. BOUNDARY edges ('B') are NOT counted.
63
+
64
+ Returns True if satisfied or if vertex has fewer than 4 fold edges (not yet active).
65
+ """
66
+ edge_ids = graph.vertex_edges[vertex_id]
67
+ fold_edges = [
68
+ eid for eid in edge_ids
69
+ if graph.edges[eid][2] in ('M', 'V')
70
+ ]
71
+
72
+ if len(fold_edges) < 4:
73
+ return True
74
+
75
+ m_count = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'M')
76
+ v_count = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'V')
77
+ return abs(m_count - v_count) == 2
78
+
79
+
80
+ def check_blb_at_vertex(vertex_id: int, graph: CreaseGraph) -> list[tuple[int, int]]:
81
+ """
82
+ Checks Big-Little-Big lemma at a single vertex.
83
+
84
+ BLB: if sector angle i is a strict local minimum (smaller than both neighbors),
85
+ the fold edges bounding that sector must have OPPOSITE MV assignments.
86
+
87
+ Returns list of (edge_a_id, edge_b_id) pairs where BLB is violated.
88
+ Empty list = no violations.
89
+ """
90
+ cyclic_edges = graph.get_cyclic_edges(vertex_id)
91
+ n = len(cyclic_edges)
92
+
93
+ if n < 4:
94
+ return []
95
+
96
+ sectors = _compute_sector_angles(vertex_id, graph)
97
+ violations = []
98
+
99
+ for i in range(n):
100
+ prev_sector = sectors[(i - 1) % n]
101
+ next_sector = sectors[(i + 1) % n]
102
+
103
+ if sectors[i] < prev_sector and sectors[i] < next_sector:
104
+ edge_a = cyclic_edges[i]
105
+ edge_b = cyclic_edges[(i + 1) % n]
106
+
107
+ assign_a = graph.edges[edge_a][2]
108
+ assign_b = graph.edges[edge_b][2]
109
+
110
+ if assign_a in ('M', 'V') and assign_b in ('M', 'V'):
111
+ if assign_a == assign_b:
112
+ violations.append((edge_a, edge_b))
113
+
114
+ return violations
115
+
116
+
117
+ def _angle_diff(a1: float, a2: float) -> float:
118
+ """Minimum angle difference between two directed lines (considering 180° symmetry)."""
119
+ diff = abs(a1 - a2) % np.pi
120
+ return min(diff, np.pi - diff)
121
+
122
+
123
+ def geometric_crease_coverage(
124
+ state: PaperState,
125
+ target_edges: list[dict],
126
+ tol_pos: float = 0.05,
127
+ tol_angle_deg: float = 5.0,
128
+ ) -> tuple[float, float, float]:
129
+ """
130
+ Computes how well the current crease pattern matches the target.
131
+
132
+ Args:
133
+ state: current paper state with crease graph
134
+ target_edges: list of {'v1': (x1,y1), 'v2': (x2,y2), 'assignment': 'M'|'V'}
135
+ tol_pos: position tolerance for midpoint matching
136
+ tol_angle_deg: angle tolerance in degrees for direction matching
137
+
138
+ Returns:
139
+ (coverage, economy, assignment_accuracy)
140
+ coverage: weighted fraction of target creases matched [0, 1];
141
+ 1.0 if position+assignment match, 0.5 if position matches but assignment doesn't
142
+ economy: penalty for excess creases [0, 1], 1.0 = no excess
143
+ assignment_accuracy: fraction of positionally matched edges that also have correct M/V assignment [0, 1];
144
+ returns 1.0 if no positional matches (vacuous case)
145
+ """
146
+ current_edges = state.crease_edges()
147
+ tol_angle_rad = np.deg2rad(tol_angle_deg)
148
+
149
+ total_score = 0.0
150
+ position_matches = 0
151
+ assignment_correct = 0
152
+
153
+ for target in target_edges:
154
+ tx1, ty1 = target['v1']
155
+ tx2, ty2 = target['v2']
156
+ t_mid = ((tx1 + tx2) / 2.0, (ty1 + ty2) / 2.0)
157
+ t_angle = np.arctan2(ty2 - ty1, tx2 - tx1)
158
+ t_assign = target.get('assignment', 'M')
159
+
160
+ for current in current_edges:
161
+ cx1, cy1 = current['v1']
162
+ cx2, cy2 = current['v2']
163
+ c_mid = ((cx1 + cx2) / 2.0, (cy1 + cy2) / 2.0)
164
+ c_angle = np.arctan2(cy2 - cy1, cx2 - cx1)
165
+ c_assign = current.get('assignment', 'M')
166
+
167
+ mid_dist = np.hypot(c_mid[0] - t_mid[0], c_mid[1] - t_mid[1])
168
+ angle_distance = _angle_diff(c_angle, t_angle)
169
+
170
+ if mid_dist <= tol_pos and angle_distance <= tol_angle_rad:
171
+ position_matches += 1
172
+ assign_match = (t_assign == c_assign)
173
+ if assign_match:
174
+ total_score += 1.0
175
+ assignment_correct += 1
176
+ else:
177
+ total_score += 0.5
178
+ break
179
+
180
+ coverage = total_score / max(len(target_edges), 1)
181
+ n_excess = max(0, len(current_edges) - len(target_edges))
182
+ economy = max(0.0, 1.0 - n_excess / max(len(target_edges), 1))
183
+ assignment_accuracy = (
184
+ assignment_correct / position_matches if position_matches > 0 else 1.0
185
+ )
186
+ return (coverage, economy, assignment_accuracy)
187
+
188
+
189
+ def check_degree_sanity(graph: CreaseGraph) -> float:
190
+ """
191
+ Checks that interior vertices have even degree (required for flat-foldability).
192
+
193
+ Returns:
194
+ Fraction of interior vertices with even degree [0, 1].
195
+ 1.0 = all interior vertices have even degree.
196
+ 0.0 = none do.
197
+ Returns 1.0 if there are no interior vertices (vacuous case).
198
+ """
199
+ interior = graph.interior_vertices()
200
+ if not interior:
201
+ return 1.0
202
+ even_count = sum(
203
+ 1 for vid in interior
204
+ if len(graph.vertex_edges[vid]) % 2 == 0
205
+ )
206
+ return even_count / len(interior)
207
+
208
+
209
+ def check_all_vertices(graph: CreaseGraph) -> dict:
210
+ """
211
+ Run all vertex-level checks on every interior vertex.
212
+
213
+ Returns dict with:
214
+ 'kawasaki': float # fraction of interior vertices passing Kawasaki [0,1]
215
+ 'maekawa': float # fraction passing Maekawa [0,1]
216
+ 'blb': float # fraction with no BLB violations [0,1]
217
+ 'n_interior': int # number of interior vertices checked
218
+ 'per_vertex': list[dict] # per-vertex details
219
+ """
220
+ interior = graph.interior_vertices()
221
+
222
+ if not interior:
223
+ return {
224
+ 'kawasaki': 1.0,
225
+ 'maekawa': 1.0,
226
+ 'blb': 1.0,
227
+ 'n_interior': 0,
228
+ 'per_vertex': [],
229
+ }
230
+
231
+ per_vertex = []
232
+ kaw_pass = 0
233
+ mae_pass = 0
234
+ blb_pass = 0
235
+
236
+ for vid in interior:
237
+ kaw_ok, kaw_val = check_kawasaki_at_vertex(vid, graph)
238
+ mae_ok = check_maekawa_at_vertex(vid, graph)
239
+ blb_violations = check_blb_at_vertex(vid, graph)
240
+ blb_ok = len(blb_violations) == 0
241
+
242
+ kaw_pass += int(kaw_ok)
243
+ mae_pass += int(mae_ok)
244
+ blb_pass += int(blb_ok)
245
+
246
+ per_vertex.append({
247
+ 'vertex_id': vid,
248
+ 'kawasaki_ok': kaw_ok,
249
+ 'kawasaki_error': kaw_val,
250
+ 'maekawa_ok': mae_ok,
251
+ 'blb_violations': blb_violations,
252
+ })
253
+
254
+ n = len(interior)
255
+ return {
256
+ 'kawasaki': kaw_pass / n,
257
+ 'maekawa': mae_pass / n,
258
+ 'blb': blb_pass / n,
259
+ 'n_interior': n,
260
+ 'per_vertex': per_vertex,
261
+ }
openenv.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ spec_version: 1
2
+ name: optigami
3
+ type: space
4
+ runtime: fastapi
5
+ app: openenv_server.app:app
6
+ port: 8000
openenv_runtime/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """OpenEnv integration runtime for Optigami."""
2
+
3
+ from .environment import OpenEnvOrigamiEnvironment
4
+ from .models import OrigamiAction, OrigamiObservation, OrigamiState
5
+
6
+ __all__ = [
7
+ "OpenEnvOrigamiEnvironment",
8
+ "OrigamiAction",
9
+ "OrigamiObservation",
10
+ "OrigamiState",
11
+ ]
openenv_runtime/environment.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ OpenEnv adapter for Optigami.
3
+
4
+ Thin wrapper around env.environment.OrigamiEnvironment that adapts it to the
5
+ OpenEnv protocol (Action/Observation types).
6
+ """
7
+ from env.environment import OrigamiEnvironment as _Env
8
+
9
+ from .models import OrigamiAction, OrigamiObservation
10
+
11
+
12
+ class OpenEnvOrigamiEnvironment:
13
+ """
14
+ OpenEnv-compatible wrapper for env.environment.OrigamiEnvironment.
15
+
16
+ Converts between env's dict-based API and OpenEnv's Action/Observation types.
17
+ """
18
+
19
+ def __init__(self, mode: str = "step", max_steps: int = 8, targets_dir=None):
20
+ self._env = _Env(mode=mode, max_steps=max_steps, targets_dir=targets_dir)
21
+
22
+ def reset(self, target_name=None, **kwargs):
23
+ obs_dict = self._env.reset(target_name=target_name)
24
+ return self._obs_dict_to_model(obs_dict, reward=None, done=False)
25
+
26
+ def step(self, action: OrigamiAction, **kwargs):
27
+ action_dict = {
28
+ "from": action.from_point,
29
+ "to": action.to_point,
30
+ "assignment": action.assignment,
31
+ }
32
+ obs_dict, reward, done, info = self._env.step(action_dict)
33
+ reward_val = reward.get("total", 0.0) if isinstance(reward, dict) else reward
34
+ return self._obs_dict_to_model(obs_dict, reward=reward_val, done=done)
35
+
36
+ def _obs_dict_to_model(self, obs_dict: dict, reward=None, done=False) -> OrigamiObservation:
37
+ return OrigamiObservation(
38
+ prompt=obs_dict.get("prompt", ""),
39
+ target_name=obs_dict.get("target_name", ""),
40
+ step=obs_dict.get("step", 0),
41
+ paper_fold_json=obs_dict.get("paper_fold_json", {}),
42
+ reward=reward,
43
+ done=done,
44
+ )
45
+
46
+ def state(self):
47
+ return self._env.state()
48
+
49
+ def close(self):
50
+ self._env.close()
51
+
52
+
53
+ __all__ = ["OpenEnvOrigamiEnvironment"]
openenv_runtime/models.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ OpenEnv Pydantic models for the env/ stack.
3
+
4
+ Matches the env/environment data shape: observations with prompt, target_name,
5
+ step, paper_fold_json; actions as fold dicts with from/to/assignment.
6
+ """
7
+ from typing import Optional
8
+
9
+ from pydantic import ConfigDict, Field
10
+
11
+ from openenv.core.env_server.types import Action, Observation, State
12
+
13
+
14
+ class OrigamiAction(Action):
15
+ """One fold operation — from_point, to_point, assignment."""
16
+
17
+ model_config = ConfigDict(populate_by_name=True)
18
+
19
+ from_point: list[float] = Field(
20
+ alias="from",
21
+ description="[x, y] start point of the crease",
22
+ )
23
+ to_point: list[float] = Field(
24
+ alias="to",
25
+ description="[x, y] end point of the crease",
26
+ )
27
+ assignment: str = Field(
28
+ description="'M' (mountain) or 'V' (valley)",
29
+ )
30
+
31
+
32
+ class OrigamiObservation(Observation):
33
+ """Observation from env.environment — prompt, target, step, paper state."""
34
+
35
+ prompt: str = Field(default="", description="LLM prompt for the current step")
36
+ target_name: str = Field(default="", description="Name of the target (.fold stem)")
37
+ step: int = Field(default=0, ge=0, description="Current step index")
38
+ paper_fold_json: dict = Field(
39
+ default_factory=dict,
40
+ description="Graph edges (crease pattern state)",
41
+ )
42
+
43
+
44
+ class OrigamiState(State):
45
+ """Server-side episode state."""
46
+
47
+ paper: dict = Field(default_factory=dict, description="Paper state")
48
+ target: Optional[str] = Field(default=None, description="Target name")
49
+ step: int = Field(default=0, ge=0, description="Step count")
50
+ mode: str = Field(default="step", description="'step' or 'code_as_policy'")
51
+
52
+
53
+ __all__ = ["OrigamiAction", "OrigamiObservation", "OrigamiState"]
openenv_server/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """OpenEnv FastAPI app package."""
openenv_server/app.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ from fastapi import HTTPException, WebSocket
9
+ from fastapi.responses import HTMLResponse, JSONResponse
10
+ from fastapi.staticfiles import StaticFiles
11
+
12
+ from openenv.core.env_server.http_server import create_app
13
+
14
+ from env.environment import OrigamiEnvironment
15
+ from openenv_runtime.environment import OpenEnvOrigamiEnvironment
16
+ from openenv_runtime.models import OrigamiAction, OrigamiObservation
17
+ from server.training_broadcast import TrainingBroadcastServer
18
+
19
+
20
+ # ---------------------------------------------------------------------------
21
+ # Numpy-safe JSON response
22
+ # ---------------------------------------------------------------------------
23
+
24
+ def _np_default(obj):
25
+ if isinstance(obj, np.bool_):
26
+ return bool(obj)
27
+ if isinstance(obj, np.integer):
28
+ return int(obj)
29
+ if isinstance(obj, np.floating):
30
+ return float(obj)
31
+ if isinstance(obj, np.ndarray):
32
+ return obj.tolist()
33
+ raise TypeError(f"Not serializable: {type(obj)}")
34
+
35
+
36
+ class NumpyJSONResponse(JSONResponse):
37
+ def render(self, content) -> bytes:
38
+ return json.dumps(content, default=_np_default).encode("utf-8")
39
+
40
+
41
+ # ---------------------------------------------------------------------------
42
+ # Episode registry for replay
43
+ # ---------------------------------------------------------------------------
44
+
45
+ _episode_registry: dict[str, dict] = {}
46
+
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # OpenEnv app + training broadcast server
50
+ # ---------------------------------------------------------------------------
51
+
52
+ app = create_app(
53
+ env=lambda: OpenEnvOrigamiEnvironment(mode="step"),
54
+ action_cls=OrigamiAction,
55
+ observation_cls=OrigamiObservation,
56
+ env_name="optigami",
57
+ )
58
+
59
+ broadcast = TrainingBroadcastServer()
60
+
61
+
62
+ def _ensure_broadcast_loop():
63
+ """Set broadcast loop on first use (replaces deprecated on_event('startup'))."""
64
+ if broadcast._loop is None or broadcast._loop.is_closed():
65
+ try:
66
+ broadcast._loop = asyncio.get_running_loop()
67
+ except RuntimeError:
68
+ pass
69
+
70
+
71
+ @app.middleware("http")
72
+ async def _set_broadcast_loop(request, call_next):
73
+ """Ensure broadcast has event loop before handling requests."""
74
+ _ensure_broadcast_loop()
75
+ return await call_next(request)
76
+
77
+
78
+ # ---------------------------------------------------------------------------
79
+ # Health endpoint
80
+ # ---------------------------------------------------------------------------
81
+
82
+ @app.get("/health", include_in_schema=True)
83
+ async def health():
84
+ return {"status": "ok"}
85
+
86
+
87
+ # ---------------------------------------------------------------------------
88
+ # Episode replay endpoint
89
+ # ---------------------------------------------------------------------------
90
+
91
+ @app.get("/episode/replay/{ep_id}", include_in_schema=True, response_class=NumpyJSONResponse)
92
+ async def replay_episode(ep_id: str):
93
+ if ep_id not in _episode_registry:
94
+ raise HTTPException(status_code=404, detail="Episode not found")
95
+ return NumpyJSONResponse(_episode_registry[ep_id])
96
+
97
+
98
+ # ---------------------------------------------------------------------------
99
+ # Training grid viewer WebSocket
100
+ # ---------------------------------------------------------------------------
101
+
102
+ @app.websocket("/ws/training")
103
+ async def training_ws(websocket: WebSocket):
104
+ """Read-only spectator WebSocket for the training grid viewer."""
105
+ _ensure_broadcast_loop()
106
+ await broadcast.connect_spectator(websocket)
107
+
108
+
109
+ # ---------------------------------------------------------------------------
110
+ # Helper: extract crease folds from .fold target
111
+ # ---------------------------------------------------------------------------
112
+
113
+ def _target_to_folds(target: dict) -> list[dict]:
114
+ """Extract crease folds from a target .fold dict (edges with M or V)."""
115
+ verts = target.get("vertices_coords", [])
116
+ edges_v = target.get("edges_vertices", [])
117
+ edges_a = target.get("edges_assignment", [])
118
+ folds = []
119
+ for (v1, v2), ass in zip(edges_v, edges_a):
120
+ if ass in ("M", "V") and v1 < len(verts) and v2 < len(verts):
121
+ p1 = verts[v1]
122
+ p2 = verts[v2]
123
+ folds.append({"from": p1, "to": p2, "assignment": ass})
124
+ return folds
125
+
126
+
127
+ def _graph_state_to_fold(paper_dict: dict) -> dict:
128
+ """Convert internal graph state dict to FOLD-format arrays for the frontend.
129
+
130
+ Input format (from env.state()['paper']):
131
+ vertices: {id: (x, y), ...}
132
+ edges: {id: (v1_id, v2_id, assignment), ...} (only M/V)
133
+
134
+ Output format (FOLD):
135
+ vertices_coords: [[x, y, 0], ...]
136
+ edges_vertices: [[i, j], ...]
137
+ edges_assignment: ['M'|'V'|'B', ...]
138
+ faces_vertices: [[i, j, k], ...] (Delaunay triangulation for 3D)
139
+ """
140
+ raw_verts = paper_dict.get("vertices", {})
141
+ raw_edges = paper_dict.get("edges", {})
142
+
143
+ if not raw_verts:
144
+ return {}
145
+
146
+ sorted_ids = sorted(raw_verts.keys(), key=lambda k: int(k) if isinstance(k, (int, str)) else k)
147
+ id_to_idx = {vid: idx for idx, vid in enumerate(sorted_ids)}
148
+
149
+ vertices_coords = []
150
+ for vid in sorted_ids:
151
+ xy = raw_verts[vid]
152
+ vertices_coords.append([float(xy[0]), float(xy[1]), 0.0])
153
+
154
+ edges_vertices = []
155
+ edges_assignment = []
156
+ for eid in sorted(raw_edges.keys(), key=lambda k: int(k) if isinstance(k, (int, str)) else k):
157
+ v1_id, v2_id, asgn = raw_edges[eid]
158
+ if v1_id in id_to_idx and v2_id in id_to_idx:
159
+ edges_vertices.append([id_to_idx[v1_id], id_to_idx[v2_id]])
160
+ edges_assignment.append(asgn)
161
+
162
+ faces_vertices = _triangulate_vertices(vertices_coords)
163
+ return {
164
+ "vertices_coords": vertices_coords,
165
+ "edges_vertices": edges_vertices,
166
+ "edges_assignment": edges_assignment,
167
+ "faces_vertices": faces_vertices,
168
+ }
169
+
170
+
171
+ def _triangulate_vertices(vertices_coords: list) -> list:
172
+ """Delaunay triangulate the 2D vertex set for 3D mesh rendering."""
173
+ if len(vertices_coords) < 3:
174
+ return []
175
+ try:
176
+ from scipy.spatial import Delaunay
177
+ pts = np.array([[v[0], v[1]] for v in vertices_coords])
178
+ tri = Delaunay(pts)
179
+ return tri.simplices.tolist()
180
+ except Exception:
181
+ return [[0, 1, 2], [0, 2, 3]] if len(vertices_coords) >= 4 else []
182
+
183
+
184
+ # ---------------------------------------------------------------------------
185
+ # API routes — must be registered BEFORE the StaticFiles catch-all mount
186
+ # ---------------------------------------------------------------------------
187
+
188
+ @app.get("/targets", include_in_schema=True, response_class=NumpyJSONResponse)
189
+ def get_targets():
190
+ """Return available target names and metadata from env/targets/*.fold."""
191
+ env = OrigamiEnvironment()
192
+ names = env.available_targets()
193
+ result: dict[str, dict] = {}
194
+ for name in names:
195
+ target = env._targets.get(name, {})
196
+ result[name] = {
197
+ "name": name,
198
+ "level": target.get("level", 1),
199
+ "description": target.get("description", ""),
200
+ "n_creases": len([a for a in target.get("edges_assignment", []) if a in ("M", "V")]),
201
+ "difficulty": target.get("level", 1),
202
+ "material": "paper",
203
+ }
204
+ return NumpyJSONResponse(result)
205
+
206
+
207
+ @app.get("/episode/demo", include_in_schema=True, response_class=NumpyJSONResponse)
208
+ def demo_episode(target: str = "half_horizontal"):
209
+ """Return a pre-solved demo episode for the given .fold target."""
210
+ env = OrigamiEnvironment(mode="step")
211
+ targets = env.available_targets()
212
+ if target not in targets:
213
+ target = targets[0] if targets else "half_horizontal"
214
+
215
+ t = env._targets.get(target, {})
216
+ folds = _target_to_folds(t)
217
+
218
+ obs_dict = env.reset(target_name=target)
219
+ steps: list[dict] = []
220
+
221
+ for i, fold_dict in enumerate(folds):
222
+ obs_dict, reward, done, info = env.step(fold_dict)
223
+ graph = env.paper.graph
224
+ all_edges = {eid: (v1, v2, a) for eid, (v1, v2, a) in graph.edges.items()}
225
+ fold_state = _graph_state_to_fold({
226
+ "vertices": dict(graph.vertices),
227
+ "edges": all_edges,
228
+ })
229
+
230
+ steps.append({
231
+ "step": i + 1,
232
+ "fold": fold_dict,
233
+ "paper_state": fold_state,
234
+ "metrics": reward if isinstance(reward, dict) else {"total": reward},
235
+ "done": done,
236
+ })
237
+ if done:
238
+ break
239
+
240
+ return NumpyJSONResponse({
241
+ "task_name": target,
242
+ "task": {"name": target, "level": t.get("level", 1), "description": t.get("description", "")},
243
+ "target_crease": t,
244
+ "steps": steps,
245
+ "final_metrics": steps[-1]["metrics"] if steps else {},
246
+ })
247
+
248
+
249
+ # ---------------------------------------------------------------------------
250
+ # Static file serving — must come LAST so API routes take priority
251
+ # ---------------------------------------------------------------------------
252
+
253
+ _BUILD_DIR = Path(__file__).resolve().parent.parent / "build"
254
+
255
+ if _BUILD_DIR.exists():
256
+ app.mount("/", StaticFiles(directory=str(_BUILD_DIR), html=True), name="renderer")
257
+ else:
258
+ @app.get("/", include_in_schema=False)
259
+ def missing_renderer_build() -> HTMLResponse:
260
+ return HTMLResponse(
261
+ """
262
+ <html><body style="font-family: sans-serif; margin: 24px;">
263
+ <h3>Renderer build not found</h3>
264
+ <p>No <code>build/</code> directory is present in the container.</p>
265
+ <p>OpenEnv API docs are available at <a href="/docs">/docs</a>.</p>
266
+ </body></html>
267
+ """,
268
+ status_code=200,
269
+ )
package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
package.json CHANGED
@@ -24,6 +24,7 @@
24
  "react-app/jest"
25
  ]
26
  },
 
27
  "browserslist": {
28
  "production": [
29
  ">0.2%",
 
24
  "react-app/jest"
25
  ]
26
  },
27
+ "proxy": "http://localhost:7860",
28
  "browserslist": {
29
  "production": [
30
  ">0.2%",
planner/decomposer.py DELETED
@@ -1,284 +0,0 @@
1
- """
2
- Task decomposer: breaks a parsed instruction into sequential sub-goals
3
- with concrete fold operations on a unit square.
4
- """
5
-
6
- from __future__ import annotations
7
-
8
- import copy
9
- from planner.knowledge import (
10
- ORIGAMI_MODELS,
11
- ORIGAMI_BASES,
12
- FOLD_OPERATIONS,
13
- get_model_steps,
14
- get_base_steps,
15
- )
16
-
17
-
18
- # ---------------------------------------------------------------------------
19
- # Internal helpers
20
- # ---------------------------------------------------------------------------
21
-
22
- def _step_to_fold_operation(step: dict) -> dict:
23
- """
24
- Convert a knowledge-base step dict into the engine's fold operation format:
25
- {"type": ..., "line": {"start": [...], "end": [...]}, "angle": ...}
26
- """
27
- op = {
28
- "type": step["type"],
29
- "line": copy.deepcopy(step["line"]),
30
- "angle": step.get("angle", 180),
31
- }
32
- if "layer_select" in step:
33
- op["layer_select"] = step["layer_select"]
34
- return op
35
-
36
-
37
- def _expected_state_after_fold(fold_type: str, prev_state: dict | None) -> dict:
38
- """
39
- Produce a lightweight expected-state dict describing what the paper
40
- should look like after a fold. This is intentionally approximate --
41
- the real simulation engine computes exact geometry.
42
- """
43
- state = dict(prev_state or {"layers": 1, "shape": "square", "phase": "flat"})
44
- if fold_type in ("valley", "mountain"):
45
- state["layers"] = state.get("layers", 1) * 2
46
- elif fold_type == "petal":
47
- state["shape"] = "narrow_diamond"
48
- elif fold_type == "squash":
49
- state["shape"] = "diamond"
50
- elif fold_type == "reverse_inside":
51
- state["shape"] = "pointed_flap_reversed"
52
- elif fold_type == "inflate":
53
- state["phase"] = "3d"
54
- elif fold_type == "turn_over":
55
- state["flipped"] = not state.get("flipped", False)
56
- elif fold_type == "unfold":
57
- # Layers don't literally halve on every unfold, but this is a hint
58
- state["layers"] = max(1, state.get("layers", 1) // 2)
59
- return state
60
-
61
-
62
- def _validation_for_fold(fold_type: str) -> dict:
63
- """Return a simple validation dict for a step."""
64
- checks: dict = {"flat_foldable": True}
65
- if fold_type in ("valley", "mountain"):
66
- checks["kawasaki_check"] = True
67
- checks["maekawa_check"] = True
68
- if fold_type == "inflate":
69
- checks["is_3d"] = True
70
- checks["flat_foldable"] = False
71
- return checks
72
-
73
-
74
- # ---------------------------------------------------------------------------
75
- # Known-model decomposition
76
- # ---------------------------------------------------------------------------
77
-
78
- def _decompose_known_model(parsed: dict) -> list[dict]:
79
- """Decompose a known model into sub-goal steps."""
80
- model_name: str = parsed["model_name"]
81
- model_info = ORIGAMI_MODELS.get(model_name)
82
- if model_info is None:
83
- return _decompose_free_fold(parsed)
84
-
85
- base_name = model_info.get("base")
86
- steps = get_model_steps(model_name)
87
- sub_goals: list[dict] = []
88
- running_state: dict = {"layers": 1, "shape": "square", "phase": "flat"}
89
-
90
- for i, step in enumerate(steps):
91
- fold_op = _step_to_fold_operation(step)
92
- running_state = _expected_state_after_fold(step["type"], running_state)
93
-
94
- sub_goals.append({
95
- "step_number": i + 1,
96
- "description": step.get("description", f"Step {i + 1}"),
97
- "base_required": base_name if i == 0 else None,
98
- "fold_operations": [fold_op],
99
- "expected_state": dict(running_state),
100
- "validation": _validation_for_fold(step["type"]),
101
- })
102
-
103
- return sub_goals
104
-
105
-
106
- # ---------------------------------------------------------------------------
107
- # Packing / optimization decomposition
108
- # ---------------------------------------------------------------------------
109
-
110
- def _decompose_packing(parsed: dict) -> list[dict]:
111
- """
112
- Decompose an optimize_packing task into sub-goals.
113
- Returns a Miura-ori-style fold plan on a unit square.
114
- """
115
- w = parsed["dimensions"]["width"]
116
- h = parsed["dimensions"]["height"]
117
- material = parsed["material"]
118
- constraints = parsed.get("constraints", {})
119
- max_folds = constraints.get("max_folds", 20)
120
-
121
- sub_goals: list[dict] = []
122
- step_num = 0
123
-
124
- # Horizontal valley/mountain pleats (zigzag in Y)
125
- n_horizontal = min(4, max_folds // 4)
126
- spacing_y = 1.0 / (n_horizontal + 1)
127
- for i in range(n_horizontal):
128
- step_num += 1
129
- y = spacing_y * (i + 1)
130
- fold_type = "valley" if i % 2 == 0 else "mountain"
131
- sub_goals.append({
132
- "step_number": step_num,
133
- "description": f"Horizontal {fold_type} fold at y={y:.3f} (pleat {i + 1}/{n_horizontal})",
134
- "base_required": None,
135
- "fold_operations": [{
136
- "type": fold_type,
137
- "line": {"start": [0.0, y], "end": [1.0, y]},
138
- "angle": 180,
139
- "layer_select": "all",
140
- }],
141
- "expected_state": {"layers": i + 2, "phase": "flat", "pattern": "miura_horizontal"},
142
- "validation": {"flat_foldable": True, "kawasaki_check": True},
143
- })
144
-
145
- # Vertical zigzag valley/mountain pleats (Miura-ori angle offsets)
146
- n_vertical = min(4, (max_folds - n_horizontal) // 2)
147
- spacing_x = 1.0 / (n_vertical + 1)
148
- for i in range(n_vertical):
149
- step_num += 1
150
- x = spacing_x * (i + 1)
151
- fold_type = "valley" if i % 2 == 0 else "mountain"
152
- # Miura-ori: alternate slight angle offset to create parallelogram cells
153
- angle_offset = 0.02 * (1 if i % 2 == 0 else -1)
154
- sub_goals.append({
155
- "step_number": step_num,
156
- "description": f"Vertical {fold_type} fold at x={x:.3f} (Miura-ori column {i + 1}/{n_vertical})",
157
- "base_required": None,
158
- "fold_operations": [{
159
- "type": fold_type,
160
- "line": {"start": [x, 0.0 + angle_offset], "end": [x, 1.0 - angle_offset]},
161
- "angle": 180,
162
- "layer_select": "all",
163
- }],
164
- "expected_state": {
165
- "layers": (n_horizontal + 1) * (i + 2),
166
- "phase": "flat",
167
- "pattern": "miura_complete" if i == n_vertical - 1 else "miura_partial",
168
- },
169
- "validation": {"flat_foldable": True, "kawasaki_check": True, "maekawa_check": True},
170
- })
171
-
172
- # Final collapse
173
- step_num += 1
174
- sub_goals.append({
175
- "step_number": step_num,
176
- "description": "Collapse all creases simultaneously into compact Miura-ori stack",
177
- "base_required": None,
178
- "fold_operations": [{
179
- "type": "valley",
180
- "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]},
181
- "angle": 180,
182
- "layer_select": "all",
183
- }],
184
- "expected_state": {
185
- "layers": (n_horizontal + 1) * (n_vertical + 1),
186
- "phase": "compact",
187
- "pattern": "miura_ori",
188
- },
189
- "validation": {
190
- "flat_foldable": True,
191
- "check_bounding_box": constraints.get("target_box"),
192
- "check_deployable": constraints.get("must_deploy", False),
193
- },
194
- })
195
-
196
- return sub_goals
197
-
198
-
199
- # ---------------------------------------------------------------------------
200
- # Free-fold / unknown model decomposition
201
- # ---------------------------------------------------------------------------
202
-
203
- def _decompose_free_fold(parsed: dict) -> list[dict]:
204
- """
205
- Generic decomposition for an unknown model or free-form folding task.
206
- Returns a minimal plan that an LLM can expand upon.
207
- """
208
- return [
209
- {
210
- "step_number": 1,
211
- "description": "Create reference creases (diagonals and midlines)",
212
- "base_required": None,
213
- "fold_operations": [
214
- {"type": "valley", "line": {"start": [0.0, 0.0], "end": [1.0, 1.0]}, "angle": 180},
215
- {"type": "unfold", "line": {"start": [0.0, 0.0], "end": [1.0, 1.0]}, "angle": 0},
216
- {"type": "valley", "line": {"start": [1.0, 0.0], "end": [0.0, 1.0]}, "angle": 180},
217
- {"type": "unfold", "line": {"start": [1.0, 0.0], "end": [0.0, 1.0]}, "angle": 0},
218
- {"type": "valley", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 180},
219
- {"type": "unfold", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 0},
220
- ],
221
- "expected_state": {"layers": 1, "shape": "square", "phase": "creased"},
222
- "validation": {"flat_foldable": True},
223
- },
224
- {
225
- "step_number": 2,
226
- "description": "Collapse into a base form using reference creases",
227
- "base_required": "preliminary_base",
228
- "fold_operations": [
229
- {"type": "valley", "line": {"start": [0.0, 0.0], "end": [1.0, 1.0]}, "angle": 180, "layer_select": "all"},
230
- ],
231
- "expected_state": {"layers": 4, "shape": "diamond", "phase": "base"},
232
- "validation": {"flat_foldable": True},
233
- },
234
- {
235
- "step_number": 3,
236
- "description": "Shape the model with additional folds (LLM determines specifics)",
237
- "base_required": None,
238
- "fold_operations": [], # Left empty for LLM to fill
239
- "expected_state": {"phase": "shaped"},
240
- "validation": {"flat_foldable": True},
241
- },
242
- ]
243
-
244
-
245
- # ---------------------------------------------------------------------------
246
- # Fold-pattern decomposition
247
- # ---------------------------------------------------------------------------
248
-
249
- def _decompose_pattern(parsed: dict) -> list[dict]:
250
- """Decompose a tessellation/pattern task."""
251
- # For now, delegate to packing which generates a Miura-ori pattern
252
- return _decompose_packing(parsed)
253
-
254
-
255
- # ---------------------------------------------------------------------------
256
- # Public API
257
- # ---------------------------------------------------------------------------
258
-
259
- def decompose_task(parsed: dict) -> list[dict]:
260
- """
261
- Decompose a parsed instruction into sequential sub-goals.
262
-
263
- Args:
264
- parsed: Output of parse_instruction()
265
-
266
- Returns:
267
- List of sub-goal dicts, each with:
268
- - step_number: int
269
- - description: str
270
- - base_required: str or None
271
- - fold_operations: list[dict] (engine-format fold dicts)
272
- - expected_state: dict
273
- - validation: dict
274
- """
275
- intent = parsed.get("intent", "free_fold")
276
-
277
- if intent == "fold_model" and parsed.get("model_name"):
278
- return _decompose_known_model(parsed)
279
- elif intent == "optimize_packing":
280
- return _decompose_packing(parsed)
281
- elif intent == "fold_pattern":
282
- return _decompose_pattern(parsed)
283
- else:
284
- return _decompose_free_fold(parsed)