Spaces:
Running
Running
new-environment
#5
by
ianalin123 - opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .dockerignore +18 -0
- .gitignore +2 -3
- Dockerfile +27 -0
- README.md +21 -67
- build/asset-manifest.json +13 -0
- build/favicon.ico +0 -0
- build/index.html +1 -0
- build/logo192.png +0 -0
- build/logo512.png +0 -0
- build/manifest.json +25 -0
- build/robots.txt +3 -0
- build/static/css/main.edb517bf.css +2 -0
- build/static/css/main.edb517bf.css.map +1 -0
- build/static/js/main.7e6cf91b.js +0 -0
- build/static/js/main.7e6cf91b.js.LICENSE.txt +49 -0
- build/static/js/main.7e6cf91b.js.map +0 -0
- docs/optigami_handoff.md +767 -0
- engine/fold_engine.py +42 -0
- engine/metrics.py +127 -0
- engine/paper.py +38 -1
- engine/physics.py +260 -0
- engine/validation.py +22 -0
- env/__init__.py +0 -0
- env/environment.py +243 -0
- env/graph.py +117 -0
- env/paper_state.py +150 -0
- env/prompts.py +235 -0
- env/rewards.py +93 -0
- env/targets/__init__.py +0 -0
- env/targets/accordion_3h.fold +67 -0
- env/targets/accordion_4h.fold +79 -0
- env/targets/diagonal_anti.fold +35 -0
- env/targets/diagonal_main.fold +35 -0
- env/targets/half_horizontal.fold +43 -0
- env/targets/half_vertical.fold +43 -0
- env/targets/thirds_h.fold +55 -0
- env/targets/thirds_v.fold +55 -0
- env/targets/validator.py +119 -0
- env/targets/validator_check.py +19 -0
- env/verifier.py +221 -0
- openenv.yaml +6 -0
- openenv_runtime/__init__.py +11 -0
- openenv_runtime/environment.py +183 -0
- openenv_runtime/models.py +63 -0
- openenv_server/__init__.py +1 -0
- openenv_server/app.py +150 -0
- package-lock.json +0 -0
- plans/implementation_plan.md +485 -0
- pyproject.toml +20 -0
- requirements.txt +5 -0
.dockerignore
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.DS_Store
|
| 3 |
+
__pycache__
|
| 4 |
+
*.pyc
|
| 5 |
+
*.pyo
|
| 6 |
+
.pytest_cache
|
| 7 |
+
.claude
|
| 8 |
+
node_modules
|
| 9 |
+
build
|
| 10 |
+
research
|
| 11 |
+
docs
|
| 12 |
+
plans
|
| 13 |
+
RESEARCH_NOTES.md
|
| 14 |
+
trainer
|
| 15 |
+
train.py
|
| 16 |
+
sim
|
| 17 |
+
viz
|
| 18 |
+
planner
|
.gitignore
CHANGED
|
@@ -8,9 +8,6 @@
|
|
| 8 |
# testing
|
| 9 |
/coverage
|
| 10 |
|
| 11 |
-
# production
|
| 12 |
-
/build
|
| 13 |
-
|
| 14 |
# misc
|
| 15 |
.DS_Store
|
| 16 |
.env.local
|
|
@@ -28,3 +25,5 @@ __pycache__/
|
|
| 28 |
|
| 29 |
# Reference repos (not pushed to HF)
|
| 30 |
.reference/
|
|
|
|
|
|
|
|
|
| 8 |
# testing
|
| 9 |
/coverage
|
| 10 |
|
|
|
|
|
|
|
|
|
|
| 11 |
# misc
|
| 12 |
.DS_Store
|
| 13 |
.env.local
|
|
|
|
| 25 |
|
| 26 |
# Reference repos (not pushed to HF)
|
| 27 |
.reference/
|
| 28 |
+
*.pyc
|
| 29 |
+
__pycache__/
|
Dockerfile
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM node:20-alpine AS web-builder
|
| 2 |
+
|
| 3 |
+
WORKDIR /web
|
| 4 |
+
COPY package*.json ./
|
| 5 |
+
RUN npm ci --no-audit --no-fund
|
| 6 |
+
COPY public ./public
|
| 7 |
+
COPY src ./src
|
| 8 |
+
RUN npm run build
|
| 9 |
+
|
| 10 |
+
FROM ghcr.io/meta-pytorch/openenv-base:latest
|
| 11 |
+
|
| 12 |
+
WORKDIR /app
|
| 13 |
+
|
| 14 |
+
# Install Python deps first for better layer caching
|
| 15 |
+
COPY requirements.txt ./
|
| 16 |
+
RUN pip install --no-cache-dir -r requirements.txt \
|
| 17 |
+
&& pip install --no-cache-dir "openenv-core[core]>=0.2.1"
|
| 18 |
+
|
| 19 |
+
# Copy application source
|
| 20 |
+
COPY . /app
|
| 21 |
+
|
| 22 |
+
# Overlay the compiled React frontend
|
| 23 |
+
COPY --from=web-builder /web/build /app/build
|
| 24 |
+
|
| 25 |
+
EXPOSE 8000
|
| 26 |
+
|
| 27 |
+
CMD ["uvicorn", "openenv_server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
README.md
CHANGED
|
@@ -3,81 +3,35 @@ title: Optigami
|
|
| 3 |
emoji: π
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: red
|
| 6 |
-
sdk:
|
| 7 |
pinned: false
|
| 8 |
-
|
| 9 |
-
app_file: build/index.html
|
| 10 |
license: mit
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
-
#
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
|
| 25 |
-
Open [http://localhost:3000](http://localhost:3000) to view it in your browser.
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
Launches the test runner in the interactive watch mode.\
|
| 33 |
-
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
|
| 34 |
-
|
| 35 |
-
### `npm run build`
|
| 36 |
-
|
| 37 |
-
Builds the app for production to the `build` folder.\
|
| 38 |
-
It correctly bundles React in production mode and optimizes the build for the best performance.
|
| 39 |
-
|
| 40 |
-
The build is minified and the filenames include the hashes.\
|
| 41 |
-
Your app is ready to be deployed!
|
| 42 |
-
|
| 43 |
-
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
|
| 44 |
-
|
| 45 |
-
### `npm run eject`
|
| 46 |
-
|
| 47 |
-
**Note: this is a one-way operation. Once you `eject`, you can't go back!**
|
| 48 |
-
|
| 49 |
-
If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
|
| 50 |
-
|
| 51 |
-
Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own.
|
| 52 |
-
|
| 53 |
-
You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it.
|
| 54 |
-
|
| 55 |
-
## Learn More
|
| 56 |
-
|
| 57 |
-
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
|
| 58 |
-
|
| 59 |
-
To learn React, check out the [React documentation](https://reactjs.org/).
|
| 60 |
-
|
| 61 |
-
### Code Splitting
|
| 62 |
-
|
| 63 |
-
This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting)
|
| 64 |
-
|
| 65 |
-
### Analyzing the Bundle Size
|
| 66 |
-
|
| 67 |
-
This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size)
|
| 68 |
-
|
| 69 |
-
### Making a Progressive Web App
|
| 70 |
-
|
| 71 |
-
This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app)
|
| 72 |
-
|
| 73 |
-
### Advanced Configuration
|
| 74 |
-
|
| 75 |
-
This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration)
|
| 76 |
-
|
| 77 |
-
### Deployment
|
| 78 |
-
|
| 79 |
-
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
|
| 80 |
-
|
| 81 |
-
### `npm run build` fails to minify
|
| 82 |
-
|
| 83 |
-
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
|
|
|
|
| 3 |
emoji: π
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: red
|
| 6 |
+
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
+
app_port: 8000
|
|
|
|
| 9 |
license: mit
|
| 10 |
+
short_description: OpenEnv origami environment and demo
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# Optigami
|
| 14 |
|
| 15 |
+
OpenEnv-compatible origami RL environment with:
|
| 16 |
+
- environment + reward checks in `env/`
|
| 17 |
+
- OpenEnv server adapter in `openenv_runtime/` and `openenv_server/`
|
| 18 |
+
- Dockerized deployment for Hugging Face Spaces
|
| 19 |
|
| 20 |
+
Entry point: `openenv_server.app:app`
|
| 21 |
+
Manifest: `openenv.yaml`
|
| 22 |
+
Container: `Dockerfile`
|
| 23 |
|
| 24 |
+
## Local Run
|
| 25 |
|
| 26 |
+
```bash
|
| 27 |
+
uvicorn openenv_server.app:app --host 0.0.0.0 --port 8000
|
| 28 |
+
```
|
| 29 |
|
| 30 |
+
## Frontend (optional local React demo)
|
|
|
|
| 31 |
|
| 32 |
+
```bash
|
| 33 |
+
npm install
|
| 34 |
+
npm start
|
| 35 |
+
```
|
| 36 |
|
| 37 |
+
This serves the dashboard against the FastAPI API.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/asset-manifest.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"files": {
|
| 3 |
+
"main.css": "/static/css/main.edb517bf.css",
|
| 4 |
+
"main.js": "/static/js/main.7e6cf91b.js",
|
| 5 |
+
"index.html": "/index.html",
|
| 6 |
+
"main.edb517bf.css.map": "/static/css/main.edb517bf.css.map",
|
| 7 |
+
"main.7e6cf91b.js.map": "/static/js/main.7e6cf91b.js.map"
|
| 8 |
+
},
|
| 9 |
+
"entrypoints": [
|
| 10 |
+
"static/css/main.edb517bf.css",
|
| 11 |
+
"static/js/main.7e6cf91b.js"
|
| 12 |
+
]
|
| 13 |
+
}
|
build/favicon.ico
ADDED
|
|
build/index.html
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>React App</title><script defer="defer" src="/static/js/main.7e6cf91b.js"></script><link href="/static/css/main.edb517bf.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
build/logo192.png
ADDED
|
build/logo512.png
ADDED
|
build/manifest.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"short_name": "React App",
|
| 3 |
+
"name": "Create React App Sample",
|
| 4 |
+
"icons": [
|
| 5 |
+
{
|
| 6 |
+
"src": "favicon.ico",
|
| 7 |
+
"sizes": "64x64 32x32 24x24 16x16",
|
| 8 |
+
"type": "image/x-icon"
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"src": "logo192.png",
|
| 12 |
+
"type": "image/png",
|
| 13 |
+
"sizes": "192x192"
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"src": "logo512.png",
|
| 17 |
+
"type": "image/png",
|
| 18 |
+
"sizes": "512x512"
|
| 19 |
+
}
|
| 20 |
+
],
|
| 21 |
+
"start_url": ".",
|
| 22 |
+
"display": "standalone",
|
| 23 |
+
"theme_color": "#000000",
|
| 24 |
+
"background_color": "#ffffff"
|
| 25 |
+
}
|
build/robots.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# https://www.robotstxt.org/robotstxt.html
|
| 2 |
+
User-agent: *
|
| 3 |
+
Disallow:
|
build/static/css/main.edb517bf.css
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@import url(https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&family=IBM+Plex+Mono:wght@300;400;500&display=swap);*,:after,:before{box-sizing:border-box;margin:0;padding:0}body{-webkit-font-smoothing:antialiased;background:#0d0d14;color:#f8fafc;font-family:IBM Plex Mono,monospace;font-size:13px;line-height:1.5;overflow-x:hidden}::-webkit-scrollbar{height:4px;width:4px}::-webkit-scrollbar-track{background:#0d0d14}::-webkit-scrollbar-thumb{background:#2a2a3a}::-webkit-scrollbar-thumb:hover{background:#3a3a5a}:root{--bg:#0d0d14;--surface:#13131d;--surface-2:#1a1a2e;--paper-white:#fafaf5;--paper-edge:#2a2a3a;--mountain:#f59e0b;--valley:#38bdf8;--target-ghost:#7c3aed33;--target-ghost-stroke:#7c3aed73;--validity:#22d3ee;--progress:#22c55e;--economy:#a78bfa;--text-primary:#f8fafc;--text-dim:#64748b;--border:#2a2a3a;--border-bright:#3a3a5a;--font-display:"JetBrains Mono",monospace;--font-mono:"IBM Plex Mono",monospace}.app{background:#0d0d14;background:var(--bg);display:flex;flex-direction:column;height:100vh;overflow:hidden}.app-header{align-items:center;background:#13131d;background:var(--surface);border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);display:flex;flex-shrink:0;gap:24px;height:48px;padding:0 20px;z-index:10}.app-title{color:#f8fafc;color:var(--text-primary);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:14px;font-weight:700;letter-spacing:.12em;white-space:nowrap}.app-title .title-accent{color:#f59e0b;color:var(--mountain)}.header-sep{background:#2a2a3a;background:var(--border);flex-shrink:0;height:24px;width:1px}.header-right{gap:16px;margin-left:auto}.api-status,.header-right{align-items:center;display:flex}.api-status{font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;gap:6px;letter-spacing:.08em}.api-status-dot{background:#64748b;background:var(--text-dim);border-radius:50%;height:6px;width:6px}.api-status-dot.ok{background:#22c55e;background:var(--progress);box-shadow:0 0 6px #22c55e;box-shadow:0 0 6px var(--progress)}.api-status-dot.err{background:#ef4444;box-shadow:0 0 6px #ef4444}.app-body{display:grid;flex:1 1;grid-template-columns:1fr 280px;overflow:hidden}.app-left{border-right:1px solid #2a2a3a;border-right:1px solid var(--border)}.app-left,.app-right{display:flex;flex-direction:column;overflow:hidden}.app-right{background:#13131d;background:var(--surface)}.canvas-row{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);display:flex;flex-shrink:0;gap:0;overflow-x:auto;padding:16px}.canvas-wrap{display:flex;flex:1 1;flex-direction:column;gap:8px;min-width:280px}.canvas-wrap+.canvas-wrap{margin-left:16px}.canvas-label{color:#64748b;color:var(--text-dim);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:500;letter-spacing:.14em;text-transform:uppercase}.canvas-svg{background:#fafaf5;background:var(--paper-white);display:block}.canvas-3d{background:linear-gradient(180deg,#1a1a2e,#0f101a);border:1px solid #2a2a3a;border:1px solid var(--border);display:block}.canvas-label-row{align-items:center;display:flex;gap:10px;justify-content:space-between}.fold-mode-toggle{background:#13131d;background:var(--surface);border:1px solid #2a2a3a;border:1px solid var(--border);display:inline-flex}.fold-mode-btn{background:#0000;border:none;color:#64748b;color:var(--text-dim);cursor:pointer;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:9px;letter-spacing:.08em;padding:3px 7px}.fold-mode-btn+.fold-mode-btn{border-left:1px solid #2a2a3a;border-left:1px solid var(--border)}.fold-mode-btn.active{background:#1f2538;color:#f8fafc;color:var(--text-primary)}.step-feed-section{display:flex;flex:1 1;flex-direction:column;overflow:hidden}.section-header{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);color:#64748b;color:var(--text-dim);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:500;letter-spacing:.14em;padding:8px 16px;text-transform:uppercase}.step-feed{flex:1 1;overflow-y:auto;padding:4px 0}.step-entry{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);cursor:default;display:flex;flex-direction:column;gap:2px;padding:8px 16px;transition:background .1s}.step-entry:hover{background:#13131d;background:var(--surface)}.step-entry.active{background:#1a1a2e;background:var(--surface-2);border-left:2px solid #38bdf8;border-left:2px solid var(--valley);padding-left:14px}.step-entry-top{align-items:center;display:flex;gap:8px}.step-num{color:#64748b;color:var(--text-dim);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:700;width:24px}.step-instruction{color:#f8fafc;color:var(--text-primary);flex:1 1;font-size:12px}.assign-badge{flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:700;line-height:1.4;padding:1px 5px}.assign-badge.M{background:#f59e0b;background:var(--mountain);color:#0d0d14}.assign-badge.V{background:#38bdf8;background:var(--valley);color:#0d0d14}.assign-badge.B{background:#3a3a5a;background:var(--border-bright)}.assign-badge.B,.step-reward-delta{color:#64748b;color:var(--text-dim)}.step-reward-delta{font-size:11px;padding-left:32px}.step-reward-delta .delta-positive{color:#22c55e;color:var(--progress)}.step-reward-delta .delta-negative{color:#ef4444}.reward-panel{border-bottom:1px solid #2a2a3a;border-bottom:1px solid var(--border);flex-shrink:0;padding:12px 16px}.reward-row{align-items:center;display:flex;gap:8px;margin-bottom:6px}.reward-row:last-child{margin-bottom:0}.reward-label{color:#64748b;color:var(--text-dim);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:10px;font-weight:500;letter-spacing:.06em;text-transform:uppercase;width:72px}.reward-track{background:#0d0d14;background:var(--bg);border:1px solid #2a2a3a;border:1px solid var(--border);flex:1 1;height:8px;overflow:hidden}.reward-bar{height:100%;transition:width .4s ease}.reward-value{color:#f8fafc;color:var(--text-primary);flex-shrink:0;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;font-weight:500;text-align:right;width:36px}.reward-value.dim{color:#64748b;color:var(--text-dim)}.reward-divider{background:#2a2a3a;background:var(--border);height:1px;margin:6px 0}.info-badges{display:flex;flex-direction:column;gap:8px;padding:12px 16px}.info-row{align-items:center;display:flex;gap:8px;justify-content:space-between}.info-key{color:#64748b;color:var(--text-dim);font-size:10px;font-weight:500;letter-spacing:.06em;text-transform:uppercase}.info-key,.info-val{font-family:JetBrains Mono,monospace;font-family:var(--font-display)}.info-val{color:#f8fafc;color:var(--text-primary);font-size:11px;font-weight:700}.info-val.bool-true{color:#22c55e;color:var(--progress)}.info-val.bool-false{color:#ef4444}.info-val.dim{color:#64748b;color:var(--text-dim)}.target-selector{align-items:center;display:flex;gap:8px}.target-selector-label{color:#64748b;color:var(--text-dim);font-size:10px;font-weight:500;letter-spacing:.1em;text-transform:uppercase;white-space:nowrap}.target-select,.target-selector-label{font-family:JetBrains Mono,monospace;font-family:var(--font-display)}.target-select{background:#1a1a2e;background:var(--surface-2);border:1px solid #3a3a5a;border:1px solid var(--border-bright);color:#f8fafc;color:var(--text-primary);cursor:pointer;font-size:11px;min-width:180px;outline:none;padding:4px 8px}.target-select:focus{border-color:#38bdf8;border-color:var(--valley)}optgroup{background:#13131d;background:var(--surface);color:#64748b;color:var(--text-dim);font-size:10px}optgroup,option{font-family:JetBrains Mono,monospace;font-family:var(--font-display)}option{background:#1a1a2e;background:var(--surface-2);color:#f8fafc;color:var(--text-primary)}.player-controls{align-items:center;display:flex;flex-shrink:0;gap:6px}.ctrl-btn{background:#1a1a2e;background:var(--surface-2);border:1px solid #3a3a5a;border:1px solid var(--border-bright);color:#f8fafc;color:var(--text-primary);cursor:pointer;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;font-weight:500;letter-spacing:.04em;line-height:1.4;padding:4px 10px;transition:background .1s,border-color .1s;white-space:nowrap}.ctrl-btn:hover:not(:disabled){background:#13131d;background:var(--surface);border-color:#64748b;border-color:var(--text-dim)}.ctrl-btn:disabled{cursor:not-allowed;opacity:.35}.ctrl-btn.play{border-color:#38bdf8;border-color:var(--valley);color:#38bdf8;color:var(--valley)}.ctrl-btn.play:hover:not(:disabled){background:#38bdf81a}.ctrl-step-display{border:1px solid #2a2a3a;border:1px solid var(--border);color:#64748b;color:var(--text-dim);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;min-width:72px;padding:4px 8px;text-align:center;white-space:nowrap}.app-overlay,.ctrl-step-display{background:#0d0d14;background:var(--bg)}.app-overlay{inset:0;justify-content:center;position:fixed;z-index:100}.app-overlay,.overlay-message{align-items:center;display:flex}.overlay-message{color:#64748b;color:var(--text-dim);font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:13px;gap:12px;letter-spacing:.1em}.pulse-dot{animation:pulse 1.2s ease-in-out infinite;background:#38bdf8;background:var(--valley);border-radius:50%;height:8px;width:8px}@keyframes pulse{0%,to{opacity:.2;transform:scale(.8)}50%{opacity:1;transform:scale(1)}}.episode-loading{align-items:center;color:#64748b;color:var(--text-dim);display:flex;font-family:JetBrains Mono,monospace;font-family:var(--font-display);font-size:11px;gap:8px;justify-content:center;letter-spacing:.08em;padding:12px 16px}
|
| 2 |
+
/*# sourceMappingURL=main.edb517bf.css.map*/
|
build/static/css/main.edb517bf.css.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"version":3,"file":"static/css/main.edb517bf.css","mappings":"6IAEA,iBACE,qBAAsB,CACtB,QAAS,CACT,SACF,CAEA,KAME,kCAAmC,CALnC,kBAAmB,CACnB,aAAc,CACd,mCAAuC,CACvC,cAAe,CACf,eAAgB,CAEhB,iBACF,CAEA,oBAEE,UAAW,CADX,SAEF,CAEA,0BACE,kBACF,CAEA,0BACE,kBACF,CAEA,gCACE,kBACF,CCjCA,MACE,YAAa,CACb,iBAAkB,CAClB,mBAAoB,CACpB,qBAAsB,CACtB,oBAAqB,CACrB,kBAAmB,CACnB,gBAAiB,CACjB,wBAAwC,CACxC,+BAA+C,CAC/C,kBAAmB,CACnB,kBAAmB,CACnB,iBAAkB,CAClB,sBAAuB,CACvB,kBAAmB,CACnB,gBAAiB,CACjB,uBAAwB,CACxB,yCAA2C,CAC3C,qCACF,CAEA,KAIE,kBAAqB,CAArB,oBAAqB,CAHrB,YAAa,CACb,qBAAsB,CACtB,YAAa,CAEb,eACF,CAGA,YAEE,kBAAmB,CAKnB,kBAA0B,CAA1B,yBAA0B,CAD1B,+BAAsC,CAAtC,qCAAsC,CALtC,YAAa,CAOb,aAAc,CALd,QAAS,CAET,WAAY,CADZ,cAAe,CAKf,UACF,CAEA,WAKE,aAA0B,CAA1B,yBAA0B,CAJ1B,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAEtB,kBACF,CAEA,yBACE,aAAsB,CAAtB,qBACF,CAEA,YAGE,kBAAyB,CAAzB,wBAAyB,CACzB,aAAc,CAFd,WAAY,CADZ,SAIF,CAEA,cAGE,QAAS,CACT,gBACF,CAEA,0BALE,kBAAmB,CADnB,YAaF,CAPA,YAEE,oCAAgC,CAAhC,+BAAgC,CADhC,cAAe,CAKf,OAAQ,CAHR,oBAIF,CAEA,gBAIE,kBAA2B,CAA3B,0BAA2B,CAD3B,iBAAkB,CADlB,UAAW,CADX,SAIF,CAEA,mBACE,kBAA2B,CAA3B,0BAA2B,CAC3B,0BAAmC,CAAnC,kCACF,CAEA,oBACE,kBAAmB,CACnB,0BACF,CAGA,UACE,YAAa,CAEb,QAAO,CADP,+BAAgC,CAEhC,eACF,CAEA,UAIE,8BAAqC,CAArC,oCACF,CAEA,qBANE,YAAa,CACb,qBAAsB,CACtB,eASF,CALA,WAIE,kBAA0B,CAA1B,yBACF,CAGA,YAKE,+BAAsC,CAAtC,qCAAsC,CAJtC,YAAa,CAGb,aAAc,CAFd,KAAM,CAIN,eAAgB,CAHhB,YAIF,CAEA,aACE,YAAa,CAGb,QAAO,CAFP,qBAAsB,CACtB,OAAQ,CAER,eACF,CAEA,0BACE,gBACF,CAEA,cAKE,aAAsB,CAAtB,qBAAsB,CAJtB,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAEtB,wBACF,CAEA,YAEE,kBAA8B,CAA9B,6BAA8B,CAD9B,aAEF,CAEA,WAEE,kDAA6D,CAC7D,wBAA+B,CAA/B,8BAA+B,CAF/B,aAGF,CAEA,kBAEE,kBAAmB,CADnB,YAAa,CAGb,QAAS,CADT,6BAEF,CAEA,kBAGE,kBAA0B,CAA1B,yBAA0B,CAD1B,wBAA+B,CAA/B,8BAA+B,CAD/B,mBAGF,CAEA,eAEE,gBAAuB,CADvB,WAAY,CAEZ,aAAsB,CAAtB,qBAAsB,CAKtB,cAAe,CAJf,oCAAgC,CAAhC,+BAAgC,CAChC,aAAc,CACd,oBAAsB,CACtB,eAEF,CAEA,8BACE,6BAAoC,CAApC,mCACF,CAEA,sBAEE,kBAAmB,CADnB,aAA0B,CAA1B,yBAEF,CAGA,mBAEE,YAAa,CADb,QAAO,CAEP,qBAAsB,CACtB,eACF,CAEA,gBAQE,+BAAsC,CAAtC,qCAAsC,CAHtC,aAAsB,CAAtB,qBAAsB,CAItB,aAAc,CARd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAGtB,gBAAiB,CADjB,wBAIF,CAEA,WAEE,QAAO,CADP,eAAgB,CAEhB,aACF,CAEA,YAKE,+BAAsC,CAAtC,qCAAsC,CACtC,cAAe,CALf,YAAa,CACb,qBAAsB,CACtB,OAAQ,CACR,gBAAiB,CAGjB,yBACF,CAEA,kBACE,kBAA0B,CAA1B,yBACF,CAEA,mBACE,kBAA4B,CAA5B,2BAA4B,CAC5B,6BAAoC,CAApC,mCAAoC,CACpC,iBACF,CAEA,gBAEE,kBAAmB,CADnB,YAAa,CAEb,OACF,CAEA,UAIE,aAAsB,CAAtB,qBAAsB,CAEtB,aAAc,CALd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAEhB,UAEF,CAEA,kBAEE,aAA0B,CAA1B,yBAA0B,CAC1B,QAAO,CAFP,cAGF,CAEA,cAME,aAAc,CALd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAEhB,eAAgB,CADhB,eAGF,CAEA,gBACE,kBAA2B,CAA3B,0BAA2B,CAC3B,aACF,CAEA,gBACE,kBAAyB,CAAzB,wBAAyB,CACzB,aACF,CAEA,gBACE,kBAAgC,CAAhC,+BAEF,CAEA,mCAHE,aAAsB,CAAtB,qBAOF,CAJA,mBACE,cAAe,CAEf,iBACF,CAEA,mCACE,aAAsB,CAAtB,qBACF,CAEA,mCACE,aACF,CAGA,cAEE,+BAAsC,CAAtC,qCAAsC,CACtC,aAAc,CAFd,iBAGF,CAEA,YAEE,kBAAmB,CADnB,YAAa,CAEb,OAAQ,CACR,iBACF,CAEA,uBACE,eACF,CAEA,cAKE,aAAsB,CAAtB,qBAAsB,CAEtB,aAAc,CANd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAItB,wBAAyB,CAFzB,UAGF,CAEA,cAGE,kBAAqB,CAArB,oBAAqB,CACrB,wBAA+B,CAA/B,8BAA+B,CAH/B,QAAO,CACP,UAAW,CAGX,eACF,CAEA,YACE,WAAY,CACZ,yBACF,CAEA,cAIE,aAA0B,CAA1B,yBAA0B,CAG1B,aAAc,CANd,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAGhB,gBAAiB,CADjB,UAGF,CAEA,kBACE,aAAsB,CAAtB,qBACF,CAEA,gBAEE,kBAAyB,CAAzB,wBAAyB,CADzB,UAAW,CAEX,YACF,CAGA,aAEE,YAAa,CACb,qBAAsB,CACtB,OAAQ,CAHR,iBAIF,CAEA,UAEE,kBAAmB,CADnB,YAAa,CAGb,OAAQ,CADR,6BAEF,CAEA,UAKE,aAAsB,CAAtB,qBAAsB,CAHtB,cAAe,CACf,eAAgB,CAChB,oBAAsB,CAEtB,wBACF,CAEA,oBARE,oCAAgC,CAAhC,+BAaF,CALA,UAIE,aAA0B,CAA1B,yBAA0B,CAF1B,cAAe,CACf,eAEF,CAEA,oBACE,aAAsB,CAAtB,qBACF,CAEA,qBACE,aACF,CAEA,cACE,aAAsB,CAAtB,qBACF,CAGA,iBAEE,kBAAmB,CADnB,YAAa,CAEb,OACF,CAEA,uBAKE,aAAsB,CAAtB,qBAAsB,CAHtB,cAAe,CACf,eAAgB,CAChB,mBAAsB,CAEtB,wBAAyB,CACzB,kBACF,CAEA,sCATE,oCAAgC,CAAhC,+BAmBF,CAVA,eACE,kBAA4B,CAA5B,2BAA4B,CAC5B,wBAAsC,CAAtC,qCAAsC,CACtC,aAA0B,CAA1B,yBAA0B,CAK1B,cAAe,CAHf,cAAe,CAIf,eAAgB,CAFhB,YAAa,CADb,eAIF,CAEA,qBACE,oBAA2B,CAA3B,0BACF,CAEA,SACE,kBAA0B,CAA1B,yBAA0B,CAC1B,aAAsB,CAAtB,qBAAsB,CAEtB,cACF,CAEA,gBAJE,oCAAgC,CAAhC,+BAQF,CAJA,OACE,kBAA4B,CAA5B,2BAA4B,CAC5B,aAA0B,CAA1B,yBAEF,CAGA,iBAEE,kBAAmB,CADnB,YAAa,CAGb,aAAc,CADd,OAEF,CAEA,UACE,kBAA4B,CAA5B,2BAA4B,CAC5B,wBAAsC,CAAtC,qCAAsC,CACtC,aAA0B,CAA1B,yBAA0B,CAK1B,cAAe,CAJf,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CACf,eAAgB,CAKhB,oBAAsB,CADtB,eAAgB,CAHhB,gBAAiB,CAKjB,0CAA8C,CAH9C,kBAIF,CAEA,+BACE,kBAA0B,CAA1B,yBAA0B,CAC1B,oBAA6B,CAA7B,4BACF,CAEA,mBAEE,kBAAmB,CADnB,WAEF,CAEA,eACE,oBAA2B,CAA3B,0BAA2B,CAC3B,aAAoB,CAApB,mBACF,CAEA,oCACE,oBACF,CAEA,mBAKE,wBAA+B,CAA/B,8BAA+B,CAF/B,aAAsB,CAAtB,qBAAsB,CAFtB,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CAMf,cAAe,CAJf,eAAgB,CAKhB,iBAAkB,CAFlB,kBAGF,CAGA,gCAPE,kBAAqB,CAArB,oBAeF,CARA,aAEE,OAAQ,CAGR,sBAAuB,CAJvB,cAAe,CAMf,WACF,CAEA,8BANE,kBAAmB,CADnB,YAeF,CARA,iBAIE,aAAsB,CAAtB,qBAAsB,CAHtB,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CAKf,QAAS,CAJT,mBAKF,CAEA,WAKE,yCAA0C,CAD1C,kBAAyB,CAAzB,wBAAyB,CADzB,iBAAkB,CADlB,UAAW,CADX,SAKF,CAEA,iBACE,MAAW,UAAY,CAAE,mBAAuB,CAChD,IAAM,SAAU,CAAE,kBAAqB,CACzC,CAGA,iBAEE,kBAAmB,CAMnB,aAAsB,CAAtB,qBAAsB,CAPtB,YAAa,CAKb,oCAAgC,CAAhC,+BAAgC,CAChC,cAAe,CAHf,OAAQ,CADR,sBAAuB,CAMvB,oBAAsB,CAJtB,iBAKF","sources":["index.css","App.css"],"sourcesContent":["@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&family=IBM+Plex+Mono:wght@300;400;500&display=swap');\n\n*, *::before, *::after {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n}\n\nbody {\n background: #0d0d14;\n color: #f8fafc;\n font-family: 'IBM Plex Mono', monospace;\n font-size: 13px;\n line-height: 1.5;\n -webkit-font-smoothing: antialiased;\n overflow-x: hidden;\n}\n\n::-webkit-scrollbar {\n width: 4px;\n height: 4px;\n}\n\n::-webkit-scrollbar-track {\n background: #0d0d14;\n}\n\n::-webkit-scrollbar-thumb {\n background: #2a2a3a;\n}\n\n::-webkit-scrollbar-thumb:hover {\n background: #3a3a5a;\n}\n",":root {\n --bg: #0d0d14;\n --surface: #13131d;\n --surface-2: #1a1a2e;\n --paper-white: #fafaf5;\n --paper-edge: #2a2a3a;\n --mountain: #f59e0b;\n --valley: #38bdf8;\n --target-ghost: rgba(124, 58, 237, 0.20);\n --target-ghost-stroke: rgba(124, 58, 237, 0.45);\n --validity: #22d3ee;\n --progress: #22c55e;\n --economy: #a78bfa;\n --text-primary: #f8fafc;\n --text-dim: #64748b;\n --border: #2a2a3a;\n --border-bright: #3a3a5a;\n --font-display: 'JetBrains Mono', monospace;\n --font-mono: 'IBM Plex Mono', monospace;\n}\n\n.app {\n display: flex;\n flex-direction: column;\n height: 100vh;\n background: var(--bg);\n overflow: hidden;\n}\n\n/* βββ HEADER βββ */\n.app-header {\n display: flex;\n align-items: center;\n gap: 24px;\n padding: 0 20px;\n height: 48px;\n border-bottom: 1px solid var(--border);\n background: var(--surface);\n flex-shrink: 0;\n z-index: 10;\n}\n\n.app-title {\n font-family: var(--font-display);\n font-size: 14px;\n font-weight: 700;\n letter-spacing: 0.12em;\n color: var(--text-primary);\n white-space: nowrap;\n}\n\n.app-title .title-accent {\n color: var(--mountain);\n}\n\n.header-sep {\n width: 1px;\n height: 24px;\n background: var(--border);\n flex-shrink: 0;\n}\n\n.header-right {\n display: flex;\n align-items: center;\n gap: 16px;\n margin-left: auto;\n}\n\n.api-status {\n font-size: 11px;\n font-family: var(--font-display);\n letter-spacing: 0.08em;\n display: flex;\n align-items: center;\n gap: 6px;\n}\n\n.api-status-dot {\n width: 6px;\n height: 6px;\n border-radius: 50%;\n background: var(--text-dim);\n}\n\n.api-status-dot.ok {\n background: var(--progress);\n box-shadow: 0 0 6px var(--progress);\n}\n\n.api-status-dot.err {\n background: #ef4444;\n box-shadow: 0 0 6px #ef4444;\n}\n\n/* βββ MAIN LAYOUT βββ */\n.app-body {\n display: grid;\n grid-template-columns: 1fr 280px;\n flex: 1;\n overflow: hidden;\n}\n\n.app-left {\n display: flex;\n flex-direction: column;\n overflow: hidden;\n border-right: 1px solid var(--border);\n}\n\n.app-right {\n display: flex;\n flex-direction: column;\n overflow: hidden;\n background: var(--surface);\n}\n\n/* βββ CANVAS ROW βββ */\n.canvas-row {\n display: flex;\n gap: 0;\n padding: 16px;\n flex-shrink: 0;\n border-bottom: 1px solid var(--border);\n overflow-x: auto;\n}\n\n.canvas-wrap {\n display: flex;\n flex-direction: column;\n gap: 8px;\n flex: 1;\n min-width: 280px;\n}\n\n.canvas-wrap + .canvas-wrap {\n margin-left: 16px;\n}\n\n.canvas-label {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.14em;\n color: var(--text-dim);\n text-transform: uppercase;\n}\n\n.canvas-svg {\n display: block;\n background: var(--paper-white);\n}\n\n.canvas-3d {\n display: block;\n background: linear-gradient(180deg, #1a1a2e 0%, #0f101a 100%);\n border: 1px solid var(--border);\n}\n\n.canvas-label-row {\n display: flex;\n align-items: center;\n justify-content: space-between;\n gap: 10px;\n}\n\n.fold-mode-toggle {\n display: inline-flex;\n border: 1px solid var(--border);\n background: var(--surface);\n}\n\n.fold-mode-btn {\n border: none;\n background: transparent;\n color: var(--text-dim);\n font-family: var(--font-display);\n font-size: 9px;\n letter-spacing: 0.08em;\n padding: 3px 7px;\n cursor: pointer;\n}\n\n.fold-mode-btn + .fold-mode-btn {\n border-left: 1px solid var(--border);\n}\n\n.fold-mode-btn.active {\n color: var(--text-primary);\n background: #1f2538;\n}\n\n/* βββ STEP FEED βββ */\n.step-feed-section {\n flex: 1;\n display: flex;\n flex-direction: column;\n overflow: hidden;\n}\n\n.section-header {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.14em;\n color: var(--text-dim);\n text-transform: uppercase;\n padding: 8px 16px;\n border-bottom: 1px solid var(--border);\n flex-shrink: 0;\n}\n\n.step-feed {\n overflow-y: auto;\n flex: 1;\n padding: 4px 0;\n}\n\n.step-entry {\n display: flex;\n flex-direction: column;\n gap: 2px;\n padding: 8px 16px;\n border-bottom: 1px solid var(--border);\n cursor: default;\n transition: background 0.1s;\n}\n\n.step-entry:hover {\n background: var(--surface);\n}\n\n.step-entry.active {\n background: var(--surface-2);\n border-left: 2px solid var(--valley);\n padding-left: 14px;\n}\n\n.step-entry-top {\n display: flex;\n align-items: center;\n gap: 8px;\n}\n\n.step-num {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 700;\n color: var(--text-dim);\n width: 24px;\n flex-shrink: 0;\n}\n\n.step-instruction {\n font-size: 12px;\n color: var(--text-primary);\n flex: 1;\n}\n\n.assign-badge {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 700;\n padding: 1px 5px;\n line-height: 1.4;\n flex-shrink: 0;\n}\n\n.assign-badge.M {\n background: var(--mountain);\n color: #0d0d14;\n}\n\n.assign-badge.V {\n background: var(--valley);\n color: #0d0d14;\n}\n\n.assign-badge.B {\n background: var(--border-bright);\n color: var(--text-dim);\n}\n\n.step-reward-delta {\n font-size: 11px;\n color: var(--text-dim);\n padding-left: 32px;\n}\n\n.step-reward-delta .delta-positive {\n color: var(--progress);\n}\n\n.step-reward-delta .delta-negative {\n color: #ef4444;\n}\n\n/* βββ REWARD PANEL βββ */\n.reward-panel {\n padding: 12px 16px;\n border-bottom: 1px solid var(--border);\n flex-shrink: 0;\n}\n\n.reward-row {\n display: flex;\n align-items: center;\n gap: 8px;\n margin-bottom: 6px;\n}\n\n.reward-row:last-child {\n margin-bottom: 0;\n}\n\n.reward-label {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.06em;\n color: var(--text-dim);\n width: 72px;\n flex-shrink: 0;\n text-transform: uppercase;\n}\n\n.reward-track {\n flex: 1;\n height: 8px;\n background: var(--bg);\n border: 1px solid var(--border);\n overflow: hidden;\n}\n\n.reward-bar {\n height: 100%;\n transition: width 0.4s ease;\n}\n\n.reward-value {\n font-family: var(--font-display);\n font-size: 11px;\n font-weight: 500;\n color: var(--text-primary);\n width: 36px;\n text-align: right;\n flex-shrink: 0;\n}\n\n.reward-value.dim {\n color: var(--text-dim);\n}\n\n.reward-divider {\n height: 1px;\n background: var(--border);\n margin: 6px 0;\n}\n\n/* βββ INFO BADGES βββ */\n.info-badges {\n padding: 12px 16px;\n display: flex;\n flex-direction: column;\n gap: 8px;\n}\n\n.info-row {\n display: flex;\n align-items: center;\n justify-content: space-between;\n gap: 8px;\n}\n\n.info-key {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.06em;\n color: var(--text-dim);\n text-transform: uppercase;\n}\n\n.info-val {\n font-family: var(--font-display);\n font-size: 11px;\n font-weight: 700;\n color: var(--text-primary);\n}\n\n.info-val.bool-true {\n color: var(--progress);\n}\n\n.info-val.bool-false {\n color: #ef4444;\n}\n\n.info-val.dim {\n color: var(--text-dim);\n}\n\n/* βββ TARGET SELECTOR βββ */\n.target-selector {\n display: flex;\n align-items: center;\n gap: 8px;\n}\n\n.target-selector-label {\n font-family: var(--font-display);\n font-size: 10px;\n font-weight: 500;\n letter-spacing: 0.10em;\n color: var(--text-dim);\n text-transform: uppercase;\n white-space: nowrap;\n}\n\n.target-select {\n background: var(--surface-2);\n border: 1px solid var(--border-bright);\n color: var(--text-primary);\n font-family: var(--font-display);\n font-size: 11px;\n padding: 4px 8px;\n outline: none;\n cursor: pointer;\n min-width: 180px;\n}\n\n.target-select:focus {\n border-color: var(--valley);\n}\n\noptgroup {\n background: var(--surface);\n color: var(--text-dim);\n font-family: var(--font-display);\n font-size: 10px;\n}\n\noption {\n background: var(--surface-2);\n color: var(--text-primary);\n font-family: var(--font-display);\n}\n\n/* βββ PLAYER CONTROLS βββ */\n.player-controls {\n display: flex;\n align-items: center;\n gap: 6px;\n flex-shrink: 0;\n}\n\n.ctrl-btn {\n background: var(--surface-2);\n border: 1px solid var(--border-bright);\n color: var(--text-primary);\n font-family: var(--font-display);\n font-size: 11px;\n font-weight: 500;\n padding: 4px 10px;\n cursor: pointer;\n white-space: nowrap;\n line-height: 1.4;\n letter-spacing: 0.04em;\n transition: background 0.1s, border-color 0.1s;\n}\n\n.ctrl-btn:hover:not(:disabled) {\n background: var(--surface);\n border-color: var(--text-dim);\n}\n\n.ctrl-btn:disabled {\n opacity: 0.35;\n cursor: not-allowed;\n}\n\n.ctrl-btn.play {\n border-color: var(--valley);\n color: var(--valley);\n}\n\n.ctrl-btn.play:hover:not(:disabled) {\n background: rgba(56, 189, 248, 0.1);\n}\n\n.ctrl-step-display {\n font-family: var(--font-display);\n font-size: 11px;\n color: var(--text-dim);\n padding: 4px 8px;\n border: 1px solid var(--border);\n background: var(--bg);\n white-space: nowrap;\n min-width: 72px;\n text-align: center;\n}\n\n/* βββ LOADING / ERROR βββ */\n.app-overlay {\n position: fixed;\n inset: 0;\n display: flex;\n align-items: center;\n justify-content: center;\n background: var(--bg);\n z-index: 100;\n}\n\n.overlay-message {\n font-family: var(--font-display);\n font-size: 13px;\n letter-spacing: 0.1em;\n color: var(--text-dim);\n display: flex;\n align-items: center;\n gap: 12px;\n}\n\n.pulse-dot {\n width: 8px;\n height: 8px;\n border-radius: 50%;\n background: var(--valley);\n animation: pulse 1.2s ease-in-out infinite;\n}\n\n@keyframes pulse {\n 0%, 100% { opacity: 0.2; transform: scale(0.8); }\n 50% { opacity: 1; transform: scale(1); }\n}\n\n/* βββ MISC βββ */\n.episode-loading {\n display: flex;\n align-items: center;\n justify-content: center;\n gap: 8px;\n padding: 12px 16px;\n font-family: var(--font-display);\n font-size: 11px;\n color: var(--text-dim);\n letter-spacing: 0.08em;\n}\n"],"names":[],"sourceRoot":""}
|
build/static/js/main.7e6cf91b.js
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
build/static/js/main.7e6cf91b.js.LICENSE.txt
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license React
|
| 3 |
+
* react-dom-client.production.js
|
| 4 |
+
*
|
| 5 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 6 |
+
*
|
| 7 |
+
* This source code is licensed under the MIT license found in the
|
| 8 |
+
* LICENSE file in the root directory of this source tree.
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
/**
|
| 12 |
+
* @license React
|
| 13 |
+
* react-dom.production.js
|
| 14 |
+
*
|
| 15 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 16 |
+
*
|
| 17 |
+
* This source code is licensed under the MIT license found in the
|
| 18 |
+
* LICENSE file in the root directory of this source tree.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
/**
|
| 22 |
+
* @license React
|
| 23 |
+
* react-jsx-runtime.production.js
|
| 24 |
+
*
|
| 25 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 26 |
+
*
|
| 27 |
+
* This source code is licensed under the MIT license found in the
|
| 28 |
+
* LICENSE file in the root directory of this source tree.
|
| 29 |
+
*/
|
| 30 |
+
|
| 31 |
+
/**
|
| 32 |
+
* @license React
|
| 33 |
+
* react.production.js
|
| 34 |
+
*
|
| 35 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 36 |
+
*
|
| 37 |
+
* This source code is licensed under the MIT license found in the
|
| 38 |
+
* LICENSE file in the root directory of this source tree.
|
| 39 |
+
*/
|
| 40 |
+
|
| 41 |
+
/**
|
| 42 |
+
* @license React
|
| 43 |
+
* scheduler.production.js
|
| 44 |
+
*
|
| 45 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 46 |
+
*
|
| 47 |
+
* This source code is licensed under the MIT license found in the
|
| 48 |
+
* LICENSE file in the root directory of this source tree.
|
| 49 |
+
*/
|
build/static/js/main.7e6cf91b.js.map
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
docs/optigami_handoff.md
ADDED
|
@@ -0,0 +1,767 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OrigamiRL β OpenEnv Hackathon Handoff Document
|
| 2 |
+
|
| 3 |
+
## TL;DR
|
| 4 |
+
|
| 5 |
+
Build the **first multi-turn RL environment where an LLM learns to generate origami folding instructions**, verified by a computational origami simulator. Target the OpenEnv Hackathon (March 7-8, 2026, SF β $100K+ in prizes). Use OpenEnv spec + Unsloth GRPO for training. Dense verifiable rewards from origami geometry theorems (Kawasaki, Maekawa). No learned reward model needed.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Hackathon Context
|
| 10 |
+
|
| 11 |
+
- **Event:** OpenEnv Hackathon SF, hosted by Cerebral Valley + Shack15 + Meta/PyTorch
|
| 12 |
+
- **Date:** March 7-8, 2026 (happening NOW)
|
| 13 |
+
- **Prize:** $100K+ cash
|
| 14 |
+
- **Teams:** Up to 4 people
|
| 15 |
+
- **Format:** Build RL environments, post-train a base model
|
| 16 |
+
|
| 17 |
+
### Judging Criteria
|
| 18 |
+
|
| 19 |
+
| Category | Weight | What Matters |
|
| 20 |
+
|----------|--------|-------------|
|
| 21 |
+
| Environment Innovation | 40% | Novel, creative, challenging. Does it meaningfully test agent behavior? |
|
| 22 |
+
| Storytelling | 30% | Clear problem explanation, engaging demo, easy to follow |
|
| 23 |
+
| Training Script Showing Improvement | 20% | Observable reward curves, before/after behavior |
|
| 24 |
+
| Reward and Training Pipeline Setup | 10% | Coherent reward logic, meaningful improvement in inference |
|
| 25 |
+
|
| 26 |
+
### Key Sponsors to Impress
|
| 27 |
+
|
| 28 |
+
- **Meta/PyTorch** β OpenEnv creators, want environments using their spec
|
| 29 |
+
- **Unsloth AI** β GRPO training infra, ART (Agent Reinforcement Trainer). USE THEIR TOOLS.
|
| 30 |
+
- **OpenPipe** β ART trainer (frontend/backend split for GRPO). Also use.
|
| 31 |
+
- **Patronus AI** β Building "generative simulators" (auto-scaling RL environments). They care about curriculum difficulty scaling and verifiable rewards.
|
| 32 |
+
- **Snorkel AI** β "2026 is the year of environments." They care about data quality and environment diversity.
|
| 33 |
+
- **Hugging Face** β OpenEnv Hub, want environments deployed there
|
| 34 |
+
- **Scale AI / Mercor** β Agent evaluation, structured task environments
|
| 35 |
+
|
| 36 |
+
---
|
| 37 |
+
|
| 38 |
+
## The Pitch (for judges)
|
| 39 |
+
|
| 40 |
+
> "Spatial reasoning is the next frontier for LLM training β NeurIPS 2025 papers like OrigamiSpace showed that even GPT-5 fails at multi-step origami reasoning. But those are benchmarks, not training environments. We built OrigamiRL: the first multi-turn RL environment where an LLM agent learns to fold paper by outputting instructions, receiving geometric feedback, and improving through GRPO. Our reward function is fully verifiable β fold validity is checked against computational origami axioms, not an LLM judge. We built it on OpenEnv + Unsloth with a natural curriculum from single folds to full cranes."
|
| 41 |
+
|
| 42 |
+
---
|
| 43 |
+
|
| 44 |
+
## Prior Work (What Exists, Where the Gaps Are)
|
| 45 |
+
|
| 46 |
+
### 1. OrigamiSpace (NeurIPS 2025 Spotlight)
|
| 47 |
+
|
| 48 |
+
- **Paper:** https://arxiv.org/abs/2511.18450
|
| 49 |
+
- **What it is:** Benchmark with 350 origami data instances (CP diagrams, folding processes, folded shapes). 4 evaluation tasks: Pattern Prediction, Multi-step Spatial Reasoning, Spatial Relationship Prediction, End-to-End CP Code Generation.
|
| 50 |
+
- **Their compiler:** Outputs detailed flattened diagrams with crease locations and stacking relationships, supports interactive simulation with MLLMs, provides comprehensive error feedback. Checks: syntax validity, geometric foldability, no self-intersections, Kawasaki's theorem, Maekawa's theorem.
|
| 51 |
+
- **Their reward metrics for code gen:** Hausdorff distance (shape similarity), dihedral angle distribution, bounding box aspect ratios, constraint satisfaction.
|
| 52 |
+
- **Difficulty levels:** Easy (3-9 steps), Medium (10-19 steps), Hard (20-30 steps)
|
| 53 |
+
- **Gap:** Single-turn only (LLM generates complete CP code in one shot). They mention RL exploration but it's not the focus. No multi-turn sequential folding.
|
| 54 |
+
|
| 55 |
+
### 2. GamiBench (Dec 2025)
|
| 56 |
+
|
| 57 |
+
- **Paper:** https://arxiv.org/abs/2512.22207
|
| 58 |
+
- **What it is:** 186 regular + 186 impossible 2D crease patterns with 3D folded shapes from 6 viewpoints. 3 VQA tasks.
|
| 59 |
+
- **Gap:** Evaluation-only, no training. Tests single-step spatial understanding.
|
| 60 |
+
|
| 61 |
+
### 3. SpatialThinker (NeurIPS 2025)
|
| 62 |
+
|
| 63 |
+
- **Paper:** https://arxiv.org/abs/2511.07403
|
| 64 |
+
- **What it is:** 3D-aware MLLM trained with RL using dense spatial rewards. Constructs scene graphs. Multi-objective reward with lexicographic gating.
|
| 65 |
+
- **Key architecture to steal:** Dense reward design with lexicographic ordering β format β count β accuracy β spatial. Nearly doubled RL training gains vs sparse rewards. Only needed 7K training samples with GRPO.
|
| 66 |
+
- **Gap:** Static scene understanding (objects on a table), not sequential physical transformations.
|
| 67 |
+
|
| 68 |
+
### 4. rigid-origami Gym (IJCAI 2023)
|
| 69 |
+
|
| 70 |
+
- **Repo:** https://github.com/belalugaX/rigid-origami
|
| 71 |
+
- **Paper:** "Automating Rigid Origami Design" (https://arxiv.org/abs/2211.13219)
|
| 72 |
+
- **What it is:** Gym environment where agent constructs crease pattern graphs on a board. Sparse rewards. Foldability validated by triangle intersection tests + kinematic rigidity model. Game terminates on non-foldable states.
|
| 73 |
+
- **Gap:** Classical RL agents (discrete grid actions), NOT LLMs generating text. Rigid-origami tessellations only, not traditional origami. No natural language.
|
| 74 |
+
|
| 75 |
+
### 5. The Unique Gap We Fill
|
| 76 |
+
|
| 77 |
+
Nobody has built a model that reasons about **sequential 2D-to-3D geometric transformations with physical constraints** through **natural language instructions** in a **multi-turn RL training loop**. Origami is uniquely hard because it requires tracking how a flat sheet's topology changes through a sequence of folds β mental rotation, spatial visualization, and perspective-taking all at once.
|
| 78 |
+
|
| 79 |
+
---
|
| 80 |
+
|
| 81 |
+
## Environment Design
|
| 82 |
+
|
| 83 |
+
### Architecture Overview
|
| 84 |
+
|
| 85 |
+
```
|
| 86 |
+
+---------------------------------------------------+
|
| 87 |
+
| OpenEnv Server |
|
| 88 |
+
| +-----------+ +----------+ +--------------+ |
|
| 89 |
+
| | State | | Action | | Reward | |
|
| 90 |
+
| | (FOLD JSON| | (LLM | | (Dense, | |
|
| 91 |
+
| | + target)| | output) | | verifiable) | |
|
| 92 |
+
| +-----------+ +----------+ +--------------+ |
|
| 93 |
+
| | | | |
|
| 94 |
+
| v v v |
|
| 95 |
+
| +-----------------------------------------------+|
|
| 96 |
+
| | Paper Geometry Engine (Python) ||
|
| 97 |
+
| | - Polygon state (Shapely) ||
|
| 98 |
+
| | - Fold operations (reflection across line) ||
|
| 99 |
+
| | - Kawasaki/Maekawa constraint checks ||
|
| 100 |
+
| | - Layer tracking ||
|
| 101 |
+
| | - FOLD format import/export ||
|
| 102 |
+
| +-----------------------------------------------+|
|
| 103 |
+
| | |
|
| 104 |
+
| v |
|
| 105 |
+
| +-----------------------------------------------+|
|
| 106 |
+
| | Three.js Visualizer (Demo only) ||
|
| 107 |
+
| | - 3D fold animation ||
|
| 108 |
+
| | - Strain heatmap ||
|
| 109 |
+
| | - Instruction stream ||
|
| 110 |
+
| +-----------------------------------------------+|
|
| 111 |
+
+---------------------------------------------------+
|
| 112 |
+
| ^
|
| 113 |
+
v |
|
| 114 |
+
+---------------------------------------------------+
|
| 115 |
+
| Unsloth ART / GRPO Trainer |
|
| 116 |
+
| - Qwen2.5-VL-7B or Qwen3-4B base model |
|
| 117 |
+
| - LoRA/QLoRA for efficient training |
|
| 118 |
+
| - Multi-turn rollouts |
|
| 119 |
+
+---------------------------------------------------+
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
### OpenEnv Spec Compliance
|
| 123 |
+
|
| 124 |
+
Must implement these APIs:
|
| 125 |
+
|
| 126 |
+
```python
|
| 127 |
+
class OrigamiEnv:
|
| 128 |
+
async def reset() -> Observation # New episode: flat paper + target
|
| 129 |
+
async def step(action) -> (Observation, reward, done, info)
|
| 130 |
+
async def state() -> State # Current paper geometry
|
| 131 |
+
async def close() # Cleanup
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
OpenEnv repo: https://github.com/meta-pytorch/OpenEnv
|
| 135 |
+
Install: `pip install -e .` then `openenv init origami_env`
|
| 136 |
+
|
| 137 |
+
### State Space
|
| 138 |
+
|
| 139 |
+
```python
|
| 140 |
+
@dataclass
|
| 141 |
+
class OrigamiState:
|
| 142 |
+
# Current paper geometry
|
| 143 |
+
vertices: List[Tuple[float, float]] # 2D vertex positions
|
| 144 |
+
edges: List[Tuple[int, int]] # Edge connectivity
|
| 145 |
+
edges_assignment: List[str] # 'M', 'V', 'B', 'F' (mountain/valley/boundary/flat)
|
| 146 |
+
edges_foldAngle: List[float] # -180 to 180 degrees
|
| 147 |
+
faces: List[List[int]] # Face vertex indices
|
| 148 |
+
layer_order: List[List[int]] # Face stacking order
|
| 149 |
+
|
| 150 |
+
# Episode context
|
| 151 |
+
target_crease_pattern: dict # Target FOLD JSON
|
| 152 |
+
target_shape_image: Optional[np.ndarray] # Target folded shape (for multimodal)
|
| 153 |
+
instruction_history: List[str] # Previous instructions
|
| 154 |
+
step_count: int
|
| 155 |
+
max_steps: int
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
This maps directly to the **FOLD format** (JSON-based, used by all origami software):
|
| 159 |
+
|
| 160 |
+
```json
|
| 161 |
+
{
|
| 162 |
+
"vertices_coords": [[0,0], [1,0], [1,1], [0,1]],
|
| 163 |
+
"edges_vertices": [[0,1], [1,2], [2,3], [3,0]],
|
| 164 |
+
"edges_assignment": ["B", "B", "B", "B"],
|
| 165 |
+
"edges_foldAngle": [0, 0, 0, 0],
|
| 166 |
+
"faces_vertices": [[0, 1, 2, 3]]
|
| 167 |
+
}
|
| 168 |
+
```
|
| 169 |
+
|
| 170 |
+
FOLD spec: https://github.com/edemaine/fold
|
| 171 |
+
FOLD JS library: https://edemaine.github.io/fold/
|
| 172 |
+
|
| 173 |
+
### Action Space
|
| 174 |
+
|
| 175 |
+
The LLM outputs a JSON action:
|
| 176 |
+
|
| 177 |
+
```json
|
| 178 |
+
{
|
| 179 |
+
"instruction": "Fold the top edge down to meet the bottom edge",
|
| 180 |
+
"fold_line": [[0, 0.5], [1, 0.5]],
|
| 181 |
+
"fold_angle": -180,
|
| 182 |
+
"assignment": "V"
|
| 183 |
+
}
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
The `instruction` field is natural language (what we're training the model to produce well). The geometric fields are the verifiable representation. During training, the model outputs both; for the final demo, the NL instruction is the star.
|
| 187 |
+
|
| 188 |
+
Alternative simpler action (for early iterations):
|
| 189 |
+
|
| 190 |
+
```json
|
| 191 |
+
{
|
| 192 |
+
"instruction": "Valley fold along the horizontal center line",
|
| 193 |
+
"fold_type": "valley",
|
| 194 |
+
"fold_axis": "horizontal",
|
| 195 |
+
"fold_position": 0.5
|
| 196 |
+
}
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
### Reward Function β Dense, Multi-Objective, Lexicographically Gated
|
| 200 |
+
|
| 201 |
+
Inspired by SpatialThinker's design. Rewards are computed in order; later rewards only apply if earlier gates pass.
|
| 202 |
+
|
| 203 |
+
```python
|
| 204 |
+
def compute_reward(state, action, new_state, target) -> dict:
|
| 205 |
+
rewards = {}
|
| 206 |
+
|
| 207 |
+
# LEVEL 1: Format (gate for everything else)
|
| 208 |
+
# Does the output parse into a valid fold operation?
|
| 209 |
+
rewards['format'] = 1.0 if parseable(action) else 0.0
|
| 210 |
+
if rewards['format'] == 0:
|
| 211 |
+
return rewards # Stop here
|
| 212 |
+
|
| 213 |
+
# LEVEL 2: Local Geometric Validity
|
| 214 |
+
# Kawasaki's theorem: sector angles at each interior vertex sum to 2pi
|
| 215 |
+
kawasaki_valid = check_kawasaki(new_state)
|
| 216 |
+
# Maekawa's theorem: |M - V| = 2 at each interior vertex
|
| 217 |
+
maekawa_valid = check_maekawa(new_state)
|
| 218 |
+
# No self-intersection
|
| 219 |
+
no_intersection = check_no_self_intersection(new_state)
|
| 220 |
+
rewards['validity'] = (kawasaki_valid + maekawa_valid + no_intersection) / 3.0
|
| 221 |
+
if rewards['validity'] < 0.5:
|
| 222 |
+
return rewards # Stop here
|
| 223 |
+
|
| 224 |
+
# LEVEL 3: Physical Feasibility
|
| 225 |
+
# Can this fold actually be performed given layer stack?
|
| 226 |
+
layer_consistent = check_layer_ordering(new_state)
|
| 227 |
+
fold_achievable = check_fold_angle_feasible(new_state)
|
| 228 |
+
rewards['feasibility'] = (layer_consistent + fold_achievable) / 2.0
|
| 229 |
+
|
| 230 |
+
# LEVEL 4: Progress Toward Target (Dense)
|
| 231 |
+
# Crease pattern graph similarity
|
| 232 |
+
cp_similarity = crease_pattern_similarity(new_state, target)
|
| 233 |
+
# Fold angle distribution match
|
| 234 |
+
angle_similarity = fold_angle_distribution_match(new_state, target)
|
| 235 |
+
# Bounding box aspect ratio match
|
| 236 |
+
bbox_similarity = bounding_box_similarity(new_state, target)
|
| 237 |
+
rewards['progress'] = 0.4 * cp_similarity + 0.4 * angle_similarity + 0.2 * bbox_similarity
|
| 238 |
+
|
| 239 |
+
# LEVEL 5: Completion Bonus
|
| 240 |
+
if shape_matches_target(new_state, target, tolerance=0.05):
|
| 241 |
+
rewards['completion'] = 10.0
|
| 242 |
+
|
| 243 |
+
# LEVEL 6: Efficiency
|
| 244 |
+
rewards['efficiency'] = -0.01 # Small step penalty to encourage fewer folds
|
| 245 |
+
|
| 246 |
+
# Total
|
| 247 |
+
rewards['total'] = (
|
| 248 |
+
0.1 * rewards['format'] +
|
| 249 |
+
0.2 * rewards['validity'] +
|
| 250 |
+
0.1 * rewards['feasibility'] +
|
| 251 |
+
0.5 * rewards['progress'] +
|
| 252 |
+
rewards.get('completion', 0) +
|
| 253 |
+
rewards['efficiency']
|
| 254 |
+
)
|
| 255 |
+
return rewards
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
### Key Origami Theorems for Verification
|
| 259 |
+
|
| 260 |
+
These are the verifiable constraints β the "unit tests" of origami:
|
| 261 |
+
|
| 262 |
+
1. **Kawasaki's Theorem:** At any interior vertex of a flat-foldable crease pattern, the alternating sum of sector angles equals zero (equivalently, they sum to 2pi on each side). NECESSARY condition for flat-foldability.
|
| 263 |
+
|
| 264 |
+
2. **Maekawa's Theorem:** At any interior vertex, the number of mountain folds minus valley folds equals +/-2. |M - V| = 2.
|
| 265 |
+
|
| 266 |
+
3. **No self-intersection:** Faces cannot penetrate each other during folding.
|
| 267 |
+
|
| 268 |
+
4. **Euler's formula for planar graphs:** V - E + F = 2 (sanity check on graph structure).
|
| 269 |
+
|
| 270 |
+
5. **Huzita-Hatori axioms:** The 7 axioms defining all possible single-fold operations (point-to-point, point-to-line, line-to-line, etc.). These define the VALID action space.
|
| 271 |
+
|
| 272 |
+
### Curriculum Design
|
| 273 |
+
|
| 274 |
+
| Level | Folds | Examples | Complexity |
|
| 275 |
+
|-------|-------|----------|-----------|
|
| 276 |
+
| 1 | 1 | Valley fold in half, mountain fold corner | Single fold validity |
|
| 277 |
+
| 2 | 2-3 | Paper airplane nose, triangle fold | Sequential dependency |
|
| 278 |
+
| 3 | 4-6 | Simple boat, fortune teller | Multi-step with symmetry |
|
| 279 |
+
| 4 | 7-12 | Paper airplane (full), jumping frog | Longer horizon planning |
|
| 280 |
+
| 5 | 13-20 | Crane, lily | Complex spatial tracking |
|
| 281 |
+
|
| 282 |
+
For the hackathon, focus on Levels 1-3. Even showing reward improvement on Level 1-2 is a strong result.
|
| 283 |
+
|
| 284 |
+
---
|
| 285 |
+
|
| 286 |
+
## Core Implementation: Python Geometry Engine
|
| 287 |
+
|
| 288 |
+
This is the MOST IMPORTANT piece. Pure Python, no JS dependencies.
|
| 289 |
+
|
| 290 |
+
```python
|
| 291 |
+
import numpy as np
|
| 292 |
+
from shapely.geometry import Polygon, LineString, MultiPolygon
|
| 293 |
+
from shapely.ops import split
|
| 294 |
+
from typing import List, Tuple, Dict
|
| 295 |
+
import json
|
| 296 |
+
|
| 297 |
+
class PaperState:
|
| 298 |
+
"""Represents the current state of the origami paper."""
|
| 299 |
+
|
| 300 |
+
def __init__(self, size: float = 1.0):
|
| 301 |
+
# Start with a unit square
|
| 302 |
+
self.regions = [Polygon([(0,0), (size,0), (size,size), (0,size)])]
|
| 303 |
+
self.fold_history = []
|
| 304 |
+
self.crease_lines = []
|
| 305 |
+
self.crease_assignments = [] # 'M' or 'V'
|
| 306 |
+
self.crease_angles = []
|
| 307 |
+
self.layer_order = [0] # Stack order of regions
|
| 308 |
+
|
| 309 |
+
def apply_fold(self, fold_line: LineString, angle: float, assignment: str) -> dict:
|
| 310 |
+
"""
|
| 311 |
+
Apply a fold operation. Returns dict with validity info.
|
| 312 |
+
fold_line: Shapely LineString defining the fold axis
|
| 313 |
+
angle: fold angle in degrees (-180 to 180)
|
| 314 |
+
assignment: 'M' (mountain) or 'V' (valley)
|
| 315 |
+
"""
|
| 316 |
+
result = {'valid': True, 'errors': []}
|
| 317 |
+
|
| 318 |
+
# 1. Split regions by fold line
|
| 319 |
+
new_regions = []
|
| 320 |
+
for region in self.regions:
|
| 321 |
+
if fold_line.intersects(region):
|
| 322 |
+
parts = split(region, fold_line)
|
| 323 |
+
new_regions.extend(parts.geoms)
|
| 324 |
+
else:
|
| 325 |
+
new_regions.append(region)
|
| 326 |
+
|
| 327 |
+
# 2. Determine which side folds (based on assignment)
|
| 328 |
+
folding_side = []
|
| 329 |
+
staying_side = []
|
| 330 |
+
for region in new_regions:
|
| 331 |
+
centroid = region.centroid
|
| 332 |
+
side = self._point_side(centroid, fold_line)
|
| 333 |
+
if side > 0:
|
| 334 |
+
folding_side.append(region)
|
| 335 |
+
else:
|
| 336 |
+
staying_side.append(region)
|
| 337 |
+
|
| 338 |
+
# 3. Reflect folding regions across fold line
|
| 339 |
+
reflected = [self._reflect_polygon(r, fold_line) for r in folding_side]
|
| 340 |
+
|
| 341 |
+
# 4. Update state
|
| 342 |
+
self.regions = staying_side + reflected
|
| 343 |
+
self.crease_lines.append(fold_line)
|
| 344 |
+
self.crease_assignments.append(assignment)
|
| 345 |
+
self.crease_angles.append(angle)
|
| 346 |
+
self.fold_history.append({
|
| 347 |
+
'line': list(fold_line.coords),
|
| 348 |
+
'angle': angle,
|
| 349 |
+
'assignment': assignment
|
| 350 |
+
})
|
| 351 |
+
|
| 352 |
+
# 5. Update layer order
|
| 353 |
+
self._update_layer_order(staying_side, reflected)
|
| 354 |
+
|
| 355 |
+
return result
|
| 356 |
+
|
| 357 |
+
def _reflect_polygon(self, poly: Polygon, line: LineString) -> Polygon:
|
| 358 |
+
"""Reflect a polygon across a line."""
|
| 359 |
+
coords = list(poly.exterior.coords)
|
| 360 |
+
reflected_coords = [self._reflect_point(p, line) for p in coords]
|
| 361 |
+
return Polygon(reflected_coords)
|
| 362 |
+
|
| 363 |
+
def _reflect_point(self, point: tuple, line: LineString) -> tuple:
|
| 364 |
+
"""Reflect a point across a line."""
|
| 365 |
+
p = np.array(point[:2])
|
| 366 |
+
l1 = np.array(line.coords[0])
|
| 367 |
+
l2 = np.array(line.coords[1])
|
| 368 |
+
d = l2 - l1
|
| 369 |
+
d = d / np.linalg.norm(d)
|
| 370 |
+
# Reflection formula: p' = p - 2(p-l1).n * n where n is normal to line
|
| 371 |
+
n = np.array([-d[1], d[0]])
|
| 372 |
+
v = p - l1
|
| 373 |
+
return tuple(p - 2 * np.dot(v, n) * n)
|
| 374 |
+
|
| 375 |
+
def _point_side(self, point, line: LineString) -> float:
|
| 376 |
+
"""Returns positive if point is on left side of line, negative if right."""
|
| 377 |
+
p = np.array([point.x, point.y])
|
| 378 |
+
l1 = np.array(line.coords[0])
|
| 379 |
+
l2 = np.array(line.coords[1])
|
| 380 |
+
return float(np.cross(l2 - l1, p - l1))
|
| 381 |
+
|
| 382 |
+
def _update_layer_order(self, staying, reflected):
|
| 383 |
+
"""Update the layer stacking order after a fold."""
|
| 384 |
+
self.layer_order = list(range(len(staying))) + \
|
| 385 |
+
list(range(len(staying), len(staying) + len(reflected)))
|
| 386 |
+
|
| 387 |
+
def to_fold_json(self) -> dict:
|
| 388 |
+
"""Export current state as FOLD format JSON."""
|
| 389 |
+
vertices = set()
|
| 390 |
+
for line in self.crease_lines:
|
| 391 |
+
for coord in line.coords:
|
| 392 |
+
vertices.add(tuple(round(c, 10) for c in coord))
|
| 393 |
+
# Add boundary vertices
|
| 394 |
+
for region in self.regions:
|
| 395 |
+
for coord in region.exterior.coords:
|
| 396 |
+
vertices.add(tuple(round(c, 10) for c in coord[:2]))
|
| 397 |
+
|
| 398 |
+
vertices = sorted(list(vertices))
|
| 399 |
+
vertex_map = {v: i for i, v in enumerate(vertices)}
|
| 400 |
+
|
| 401 |
+
edge_set = set()
|
| 402 |
+
edges_list = []
|
| 403 |
+
assignments_list = []
|
| 404 |
+
angles_list = []
|
| 405 |
+
|
| 406 |
+
# Add crease edges
|
| 407 |
+
for i, line in enumerate(self.crease_lines):
|
| 408 |
+
c = [tuple(round(x, 10) for x in coord) for coord in line.coords]
|
| 409 |
+
edge = tuple(sorted([vertex_map[c[0]], vertex_map[c[1]]]))
|
| 410 |
+
if edge not in edge_set:
|
| 411 |
+
edge_set.add(edge)
|
| 412 |
+
edges_list.append(list(edge))
|
| 413 |
+
assignments_list.append(self.crease_assignments[i])
|
| 414 |
+
angles_list.append(self.crease_angles[i])
|
| 415 |
+
|
| 416 |
+
return {
|
| 417 |
+
'vertices_coords': [list(v) for v in vertices],
|
| 418 |
+
'edges_vertices': edges_list,
|
| 419 |
+
'edges_assignment': assignments_list,
|
| 420 |
+
'edges_foldAngle': angles_list,
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class OrigamiVerifier:
|
| 425 |
+
"""Verifiable reward functions based on origami theorems."""
|
| 426 |
+
|
| 427 |
+
@staticmethod
|
| 428 |
+
def check_kawasaki(state: PaperState) -> bool:
|
| 429 |
+
"""Kawasaki's theorem: alternating sum of angles at each interior vertex = 0."""
|
| 430 |
+
fold_json = state.to_fold_json()
|
| 431 |
+
vertices = fold_json['vertices_coords']
|
| 432 |
+
edges = fold_json['edges_vertices']
|
| 433 |
+
|
| 434 |
+
for v_idx in range(len(vertices)):
|
| 435 |
+
v = vertices[v_idx]
|
| 436 |
+
incident_edges = [e for e in edges if v_idx in e]
|
| 437 |
+
if len(incident_edges) < 4:
|
| 438 |
+
continue # Need degree-4+ for Kawasaki
|
| 439 |
+
|
| 440 |
+
# Calculate sector angles
|
| 441 |
+
angles = []
|
| 442 |
+
for e in incident_edges:
|
| 443 |
+
other = e[1] if e[0] == v_idx else e[0]
|
| 444 |
+
other_v = vertices[other]
|
| 445 |
+
angle = np.arctan2(other_v[1] - v[1], other_v[0] - v[0])
|
| 446 |
+
angles.append(angle)
|
| 447 |
+
|
| 448 |
+
angles.sort()
|
| 449 |
+
sector_angles = []
|
| 450 |
+
for i in range(len(angles) - 1):
|
| 451 |
+
sector_angles.append(angles[i+1] - angles[i])
|
| 452 |
+
sector_angles.append(2*np.pi - (angles[-1] - angles[0]))
|
| 453 |
+
|
| 454 |
+
# Kawasaki: alternating sum should be ~0
|
| 455 |
+
if len(sector_angles) >= 4:
|
| 456 |
+
alt_sum = sum(sector_angles[::2]) - sum(sector_angles[1::2])
|
| 457 |
+
if abs(alt_sum) > 0.01:
|
| 458 |
+
return False
|
| 459 |
+
return True
|
| 460 |
+
|
| 461 |
+
@staticmethod
|
| 462 |
+
def check_maekawa(state: PaperState) -> bool:
|
| 463 |
+
"""Maekawa's theorem: |M - V| = 2 at each interior vertex."""
|
| 464 |
+
fold_json = state.to_fold_json()
|
| 465 |
+
vertices = fold_json['vertices_coords']
|
| 466 |
+
edges = fold_json['edges_vertices']
|
| 467 |
+
assignments = fold_json['edges_assignment']
|
| 468 |
+
|
| 469 |
+
for v_idx in range(len(vertices)):
|
| 470 |
+
incident = [(i, e) for i, e in enumerate(edges) if v_idx in e]
|
| 471 |
+
m_count = sum(1 for i, _ in incident if i < len(assignments) and assignments[i] == 'M')
|
| 472 |
+
v_count = sum(1 for i, _ in incident if i < len(assignments) and assignments[i] == 'V')
|
| 473 |
+
|
| 474 |
+
if m_count + v_count >= 4: # Interior vertex with folds
|
| 475 |
+
if abs(m_count - v_count) != 2:
|
| 476 |
+
return False
|
| 477 |
+
return True
|
| 478 |
+
|
| 479 |
+
@staticmethod
|
| 480 |
+
def crease_pattern_similarity(state: PaperState, target_fold_json: dict) -> float:
|
| 481 |
+
"""Compare current crease pattern to target. Returns 0-1 similarity."""
|
| 482 |
+
current = state.to_fold_json()
|
| 483 |
+
|
| 484 |
+
n_current = len(current.get('edges_vertices', []))
|
| 485 |
+
n_target = len(target_fold_json.get('edges_vertices', []))
|
| 486 |
+
|
| 487 |
+
if n_target == 0:
|
| 488 |
+
return 1.0 if n_current == 0 else 0.0
|
| 489 |
+
|
| 490 |
+
edge_count_sim = 1.0 - abs(n_current - n_target) / max(n_target, 1)
|
| 491 |
+
edge_count_sim = max(0, edge_count_sim)
|
| 492 |
+
|
| 493 |
+
current_assignments = current.get('edges_assignment', [])
|
| 494 |
+
target_assignments = target_fold_json.get('edges_assignment', [])
|
| 495 |
+
|
| 496 |
+
c_m = current_assignments.count('M')
|
| 497 |
+
c_v = current_assignments.count('V')
|
| 498 |
+
t_m = target_assignments.count('M')
|
| 499 |
+
t_v = target_assignments.count('V')
|
| 500 |
+
|
| 501 |
+
total = max(t_m + t_v, 1)
|
| 502 |
+
assign_sim = 1.0 - (abs(c_m - t_m) + abs(c_v - t_v)) / (2 * total)
|
| 503 |
+
assign_sim = max(0, assign_sim)
|
| 504 |
+
|
| 505 |
+
return 0.5 * edge_count_sim + 0.5 * assign_sim
|
| 506 |
+
```
|
| 507 |
+
|
| 508 |
+
---
|
| 509 |
+
|
| 510 |
+
## OpenEnv Environment Wrapper
|
| 511 |
+
|
| 512 |
+
```python
|
| 513 |
+
# origami_env/server.py
|
| 514 |
+
from openenv.core import Environment
|
| 515 |
+
from paper_engine import PaperState, OrigamiVerifier
|
| 516 |
+
from shapely.geometry import LineString
|
| 517 |
+
import json
|
| 518 |
+
|
| 519 |
+
class OrigamiEnvironment(Environment):
|
| 520 |
+
|
| 521 |
+
def __init__(self, targets_dir="targets/", max_steps=20):
|
| 522 |
+
self.targets_dir = targets_dir
|
| 523 |
+
self.max_steps = max_steps
|
| 524 |
+
self.paper = None
|
| 525 |
+
self.target = None
|
| 526 |
+
self.step_count = 0
|
| 527 |
+
|
| 528 |
+
async def reset(self, target_id=None):
|
| 529 |
+
self.paper = PaperState(size=1.0)
|
| 530 |
+
self.target = self._load_target(target_id)
|
| 531 |
+
self.step_count = 0
|
| 532 |
+
return self._get_observation()
|
| 533 |
+
|
| 534 |
+
async def step(self, action):
|
| 535 |
+
self.step_count += 1
|
| 536 |
+
|
| 537 |
+
# Parse action
|
| 538 |
+
try:
|
| 539 |
+
fold_line = LineString(action['fold_line'])
|
| 540 |
+
angle = action['fold_angle']
|
| 541 |
+
assignment = action['assignment']
|
| 542 |
+
except (KeyError, Exception):
|
| 543 |
+
reward = {'format': 0, 'total': -0.1}
|
| 544 |
+
return self._get_observation(), reward, False, {'error': 'parse_failed'}
|
| 545 |
+
|
| 546 |
+
# Apply fold
|
| 547 |
+
result = self.paper.apply_fold(fold_line, angle, assignment)
|
| 548 |
+
|
| 549 |
+
# Compute rewards
|
| 550 |
+
reward = self._compute_reward(result)
|
| 551 |
+
|
| 552 |
+
# Check termination
|
| 553 |
+
done = (
|
| 554 |
+
self.step_count >= self.max_steps or
|
| 555 |
+
reward.get('completion', 0) > 0
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
return self._get_observation(), reward, done, {}
|
| 559 |
+
|
| 560 |
+
async def state(self):
|
| 561 |
+
return {
|
| 562 |
+
'paper': self.paper.to_fold_json(),
|
| 563 |
+
'target': self.target,
|
| 564 |
+
'step': self.step_count,
|
| 565 |
+
'fold_history': self.paper.fold_history
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
def _compute_reward(self, fold_result):
|
| 569 |
+
rewards = {}
|
| 570 |
+
rewards['format'] = 1.0
|
| 571 |
+
|
| 572 |
+
kawasaki = OrigamiVerifier.check_kawasaki(self.paper)
|
| 573 |
+
maekawa = OrigamiVerifier.check_maekawa(self.paper)
|
| 574 |
+
rewards['validity'] = (float(kawasaki) + float(maekawa)) / 2.0
|
| 575 |
+
|
| 576 |
+
rewards['progress'] = OrigamiVerifier.crease_pattern_similarity(
|
| 577 |
+
self.paper, self.target
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
if rewards['progress'] > 0.95:
|
| 581 |
+
rewards['completion'] = 10.0
|
| 582 |
+
|
| 583 |
+
rewards['efficiency'] = -0.01
|
| 584 |
+
|
| 585 |
+
rewards['total'] = (
|
| 586 |
+
0.1 * rewards['format'] +
|
| 587 |
+
0.2 * rewards['validity'] +
|
| 588 |
+
0.6 * rewards['progress'] +
|
| 589 |
+
rewards.get('completion', 0) +
|
| 590 |
+
rewards['efficiency']
|
| 591 |
+
)
|
| 592 |
+
return rewards
|
| 593 |
+
|
| 594 |
+
def _get_observation(self):
|
| 595 |
+
return {
|
| 596 |
+
'paper_state': self.paper.to_fold_json(),
|
| 597 |
+
'target': self.target,
|
| 598 |
+
'step': self.step_count,
|
| 599 |
+
'instruction_history': [str(f['line']) for f in self.paper.fold_history]
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
def _load_target(self, target_id):
|
| 603 |
+
if target_id:
|
| 604 |
+
with open(f"{self.targets_dir}/{target_id}.fold") as f:
|
| 605 |
+
return json.load(f)
|
| 606 |
+
# Default: simple valley fold in half
|
| 607 |
+
return {
|
| 608 |
+
'vertices_coords': [[0,0], [1,0], [1,1], [0,1], [0,0.5], [1,0.5]],
|
| 609 |
+
'edges_vertices': [[0,1], [1,2], [2,3], [3,0], [4,5]],
|
| 610 |
+
'edges_assignment': ['B', 'B', 'B', 'B', 'V'],
|
| 611 |
+
'edges_foldAngle': [0, 0, 0, 0, -180],
|
| 612 |
+
}
|
| 613 |
+
```
|
| 614 |
+
|
| 615 |
+
---
|
| 616 |
+
|
| 617 |
+
## Training Script (Unsloth GRPO)
|
| 618 |
+
|
| 619 |
+
```python
|
| 620 |
+
# train.py
|
| 621 |
+
from unsloth import FastLanguageModel
|
| 622 |
+
from trl import GRPOConfig, GRPOTrainer
|
| 623 |
+
import torch
|
| 624 |
+
|
| 625 |
+
# Load model
|
| 626 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 627 |
+
model_name="unsloth/Qwen2.5-7B-Instruct",
|
| 628 |
+
max_seq_length=4096,
|
| 629 |
+
load_in_4bit=True,
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
# Add LoRA
|
| 633 |
+
model = FastLanguageModel.get_peft_model(
|
| 634 |
+
model,
|
| 635 |
+
r=32,
|
| 636 |
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
|
| 637 |
+
"gate_proj", "up_proj", "down_proj"],
|
| 638 |
+
lora_alpha=32,
|
| 639 |
+
lora_dropout=0,
|
| 640 |
+
use_gradient_checkpointing="unsloth",
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
# Reward function
|
| 644 |
+
def origami_reward(completions, prompts):
|
| 645 |
+
"""Compute rewards for a batch of completions."""
|
| 646 |
+
rewards = []
|
| 647 |
+
for completion in completions:
|
| 648 |
+
try:
|
| 649 |
+
action = parse_fold_action(completion)
|
| 650 |
+
paper = PaperState()
|
| 651 |
+
result = paper.apply_fold(action['fold_line'], action['angle'], action['assignment'])
|
| 652 |
+
r = compute_reward(paper, target)
|
| 653 |
+
rewards.append(r['total'])
|
| 654 |
+
except Exception:
|
| 655 |
+
rewards.append(-0.1)
|
| 656 |
+
return rewards
|
| 657 |
+
|
| 658 |
+
# GRPO Config
|
| 659 |
+
config = GRPOConfig(
|
| 660 |
+
output_dir="origami-grpo",
|
| 661 |
+
num_train_epochs=3,
|
| 662 |
+
per_device_train_batch_size=4,
|
| 663 |
+
gradient_accumulation_steps=4,
|
| 664 |
+
learning_rate=5e-6,
|
| 665 |
+
max_completion_length=512,
|
| 666 |
+
num_generations=8,
|
| 667 |
+
temperature=1.0,
|
| 668 |
+
logging_steps=1,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
dataset = load_origami_prompts()
|
| 672 |
+
|
| 673 |
+
trainer = GRPOTrainer(
|
| 674 |
+
model=model,
|
| 675 |
+
config=config,
|
| 676 |
+
train_dataset=dataset,
|
| 677 |
+
reward_funcs=[origami_reward],
|
| 678 |
+
tokenizer=tokenizer,
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
trainer.train()
|
| 682 |
+
```
|
| 683 |
+
|
| 684 |
+
---
|
| 685 |
+
|
| 686 |
+
## Visualization (Demo Only β Not in Training Loop)
|
| 687 |
+
|
| 688 |
+
### Options
|
| 689 |
+
|
| 690 |
+
1. **Origami Simulator** β https://github.com/amandaghassaei/OrigamiSimulator β Three.js, accepts FOLD files, shows folding animation with strain visualization
|
| 691 |
+
2. **PackCAD** β https://packcad.com/ β Web-based, SVG crease patterns, rigid folding simulation
|
| 692 |
+
3. **Custom Three.js** β Simpler but more control
|
| 693 |
+
|
| 694 |
+
### Demo UI Layout
|
| 695 |
+
|
| 696 |
+
```
|
| 697 |
+
+----------------------+----------------------+
|
| 698 |
+
| Instruction Stream | 3D Fold Viewer |
|
| 699 |
+
| | |
|
| 700 |
+
| Step 1: Valley fold | [Three.js canvas] |
|
| 701 |
+
| along center [OK] | |
|
| 702 |
+
| | Paper animating |
|
| 703 |
+
| Step 2: Fold top | fold by fold |
|
| 704 |
+
| corners to center | |
|
| 705 |
+
| | |
|
| 706 |
+
+----------------------+----------------------+
|
| 707 |
+
| Reward Dashboard |
|
| 708 |
+
| Format: ========== 1.0 |
|
| 709 |
+
| Validity: ========.. 0.8 |
|
| 710 |
+
| Progress: ======.... 0.6 |
|
| 711 |
+
| Total: =======... 0.72 |
|
| 712 |
+
| |
|
| 713 |
+
| [Reward curve over training steps] |
|
| 714 |
+
+----------------------------------------------+
|
| 715 |
+
```
|
| 716 |
+
|
| 717 |
+
---
|
| 718 |
+
|
| 719 |
+
## Key Libraries and Resources
|
| 720 |
+
|
| 721 |
+
| Tool | Purpose | Link |
|
| 722 |
+
|------|---------|------|
|
| 723 |
+
| OpenEnv | Environment framework | https://github.com/meta-pytorch/OpenEnv |
|
| 724 |
+
| Unsloth | GRPO training | https://github.com/unslothai/unsloth |
|
| 725 |
+
| OpenPipe ART | Multi-turn RL trainer | https://github.com/OpenPipe/ART |
|
| 726 |
+
| FOLD format | Origami data structure | https://github.com/edemaine/fold |
|
| 727 |
+
| Rabbit Ear | JS origami library | https://github.com/rabbit-ear/rabbit-ear |
|
| 728 |
+
| Origami Simulator | 3D visualization | https://github.com/amandaghassaei/OrigamiSimulator |
|
| 729 |
+
| PackCAD | Folding simulation | https://packcad.com/ |
|
| 730 |
+
| Shapely | Python geometry | pip install shapely |
|
| 731 |
+
| rigid-origami gym | Reference gym env | https://github.com/belalugaX/rigid-origami |
|
| 732 |
+
|
| 733 |
+
### Papers to Cite
|
| 734 |
+
|
| 735 |
+
- OrigamiSpace: https://arxiv.org/abs/2511.18450
|
| 736 |
+
- GamiBench: https://arxiv.org/abs/2512.22207
|
| 737 |
+
- SpatialThinker: https://arxiv.org/abs/2511.07403
|
| 738 |
+
- Automating Rigid Origami Design: https://arxiv.org/abs/2211.13219
|
| 739 |
+
- FOLD format spec: https://github.com/edemaine/fold/blob/main/doc/spec.md
|
| 740 |
+
|
| 741 |
+
---
|
| 742 |
+
|
| 743 |
+
## Priority Build Order
|
| 744 |
+
|
| 745 |
+
1. **Python geometry engine** β PaperState class with fold operations and FOLD export
|
| 746 |
+
2. **Verifier functions** β Kawasaki, Maekawa, similarity metrics
|
| 747 |
+
3. **OpenEnv wrapper** β step/reset/state API
|
| 748 |
+
4. **Simple targets** β Hand-create 5-10 Level 1-2 targets as .fold files
|
| 749 |
+
5. **Training script** β Wire up Unsloth GRPO with reward function
|
| 750 |
+
6. **Run training** β Even on small model, get reward curves
|
| 751 |
+
7. **Three.js visualizer** β For demo only, not in training loop
|
| 752 |
+
8. **Before/after demo** β Show base model vs trained model outputs
|
| 753 |
+
9. **Polish presentation narrative**
|
| 754 |
+
|
| 755 |
+
---
|
| 756 |
+
|
| 757 |
+
## Narrative for Judges
|
| 758 |
+
|
| 759 |
+
**The story arc:**
|
| 760 |
+
|
| 761 |
+
1. "LLMs are great at text but terrible at spatial reasoning"
|
| 762 |
+
2. "Origami is the perfect testbed β it's sequential, physical, and verifiable"
|
| 763 |
+
3. "NeurIPS 2025 showed even GPT-5 fails at origami benchmarks, but nobody built a TRAINING environment"
|
| 764 |
+
4. "We built OrigamiRL β the first multi-turn RL environment for origami instruction generation"
|
| 765 |
+
5. "Our rewards come from math theorems, not vibes β Kawasaki's theorem is our unit test"
|
| 766 |
+
6. "Watch the model go from generating paper-tearing nonsense to valid fold sequences"
|
| 767 |
+
7. "This generalizes to any domain where LLMs need to output structured physical instructions"
|
engine/fold_engine.py
CHANGED
|
@@ -151,6 +151,8 @@ def apply_fold(
|
|
| 151 |
elif face_sides[i] == "fixed" and face_sides[j] == "rotated":
|
| 152 |
new_paper.face_orders.append((j, i, 1))
|
| 153 |
|
|
|
|
|
|
|
| 154 |
return new_paper, None
|
| 155 |
|
| 156 |
|
|
@@ -205,3 +207,43 @@ def execute_fold_strategy(
|
|
| 205 |
applied.append(fold)
|
| 206 |
|
| 207 |
return current, applied, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
elif face_sides[i] == "fixed" and face_sides[j] == "rotated":
|
| 152 |
new_paper.face_orders.append((j, i, 1))
|
| 153 |
|
| 154 |
+
new_paper.fold_count += 1
|
| 155 |
+
|
| 156 |
return new_paper, None
|
| 157 |
|
| 158 |
|
|
|
|
| 207 |
applied.append(fold)
|
| 208 |
|
| 209 |
return current, applied, None
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def apply_pleat(
|
| 213 |
+
paper: Paper,
|
| 214 |
+
line1: dict,
|
| 215 |
+
line2: dict,
|
| 216 |
+
angle: float = 180.0,
|
| 217 |
+
) -> tuple[Paper, str | None]:
|
| 218 |
+
"""Pleat fold: valley at line1, mountain at line2 (two parallel folds).
|
| 219 |
+
|
| 220 |
+
Both line dicts have the form: {"start": [x, y], "end": [x, y]}
|
| 221 |
+
Returns (new_paper, error_or_None).
|
| 222 |
+
"""
|
| 223 |
+
paper, err = apply_fold(paper, {"type": "valley", "line": line1, "angle": angle})
|
| 224 |
+
if err:
|
| 225 |
+
return paper, f"Pleat valley fold failed: {err}"
|
| 226 |
+
paper, err = apply_fold(paper, {"type": "mountain", "line": line2, "angle": angle})
|
| 227 |
+
if err:
|
| 228 |
+
return paper, f"Pleat mountain fold failed: {err}"
|
| 229 |
+
return paper, None
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def apply_crimp(
|
| 233 |
+
paper: Paper,
|
| 234 |
+
line1: dict,
|
| 235 |
+
line2: dict,
|
| 236 |
+
angle: float = 180.0,
|
| 237 |
+
) -> tuple[Paper, str | None]:
|
| 238 |
+
"""Crimp fold: mountain at line1, valley at line2 (reverse of pleat).
|
| 239 |
+
|
| 240 |
+
Both line dicts have the form: {"start": [x, y], "end": [x, y]}
|
| 241 |
+
Returns (new_paper, error_or_None).
|
| 242 |
+
"""
|
| 243 |
+
paper, err = apply_fold(paper, {"type": "mountain", "line": line1, "angle": angle})
|
| 244 |
+
if err:
|
| 245 |
+
return paper, f"Crimp mountain fold failed: {err}"
|
| 246 |
+
paper, err = apply_fold(paper, {"type": "valley", "line": line2, "angle": angle})
|
| 247 |
+
if err:
|
| 248 |
+
return paper, f"Crimp valley fold failed: {err}"
|
| 249 |
+
return paper, None
|
engine/metrics.py
CHANGED
|
@@ -102,3 +102,130 @@ def compute_metrics(paper: Paper, original_paper: Paper | None = None) -> dict:
|
|
| 102 |
"num_faces": len(paper.faces),
|
| 103 |
"num_layers": paper.num_layers,
|
| 104 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
"num_faces": len(paper.faces),
|
| 103 |
"num_layers": paper.num_layers,
|
| 104 |
}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def compute_all_metrics(paper, task: dict, validation: dict) -> dict:
|
| 108 |
+
"""Compute every metric and return a flat dict.
|
| 109 |
+
|
| 110 |
+
Called after physics + validation. Combines validity, compactness,
|
| 111 |
+
structural, efficiency, and deployability metrics.
|
| 112 |
+
|
| 113 |
+
Parameters
|
| 114 |
+
----------
|
| 115 |
+
paper : Paper
|
| 116 |
+
Current paper state (after simulate()).
|
| 117 |
+
task : dict
|
| 118 |
+
Task definition with keys: width, height, target_ratio, target_box, must_deploy.
|
| 119 |
+
validation : dict
|
| 120 |
+
Output of validate_state(paper).
|
| 121 |
+
"""
|
| 122 |
+
import numpy as np
|
| 123 |
+
|
| 124 |
+
bb = paper.bounding_box # (3,) array
|
| 125 |
+
original_area = paper.original_area if paper.original_area > 0 else (paper.material.thickness_mm / 1000.0)
|
| 126 |
+
t = paper.material.thickness_mm / 1000.0
|
| 127 |
+
original_bbox_vol = original_area * t
|
| 128 |
+
folded_bbox_vol = float(bb[0] * bb[1] * bb[2]) if bb[2] > 0 else float(bb[0] * bb[1] * t)
|
| 129 |
+
|
| 130 |
+
# ββ Folded area (XY footprint) ββββββββββββββββββββββββββββββββ
|
| 131 |
+
if len(paper.vertices) >= 3:
|
| 132 |
+
try:
|
| 133 |
+
from scipy.spatial import ConvexHull
|
| 134 |
+
hull = ConvexHull(paper.vertices[:, :2])
|
| 135 |
+
folded_area = float(hull.volume)
|
| 136 |
+
except Exception:
|
| 137 |
+
ptp = np.ptp(paper.vertices[:, :2], axis=0)
|
| 138 |
+
folded_area = float(ptp[0] * ptp[1])
|
| 139 |
+
else:
|
| 140 |
+
folded_area = original_area
|
| 141 |
+
|
| 142 |
+
deployment_ratio = folded_area / original_area if original_area > 0 else 1.0
|
| 143 |
+
compactness = 1.0 - deployment_ratio
|
| 144 |
+
volume_compaction = folded_bbox_vol / original_bbox_vol if original_bbox_vol > 0 else 1.0
|
| 145 |
+
material_volume = original_area * t
|
| 146 |
+
packing_efficiency = material_volume / folded_bbox_vol if folded_bbox_vol > 0 else 0.0
|
| 147 |
+
|
| 148 |
+
# ββ Target box check βββββββββββββββββββββββββββββββββββββββββ
|
| 149 |
+
target_box = task.get("target_box")
|
| 150 |
+
fits_target_box = False
|
| 151 |
+
if target_box and len(target_box) == 3:
|
| 152 |
+
fits_target_box = bool(
|
| 153 |
+
bb[0] <= target_box[0] + 1e-6 and
|
| 154 |
+
bb[1] <= target_box[1] + 1e-6 and
|
| 155 |
+
bb[2] <= target_box[2] + 1e-6
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# ββ Strain βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 159 |
+
strain = paper.strain_per_vertex
|
| 160 |
+
max_strain = float(np.max(strain)) if len(strain) > 0 else 0.0
|
| 161 |
+
mean_strain = float(np.mean(strain)) if len(strain) > 0 else 0.0
|
| 162 |
+
|
| 163 |
+
# ββ Energy βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 164 |
+
energy = paper.energy
|
| 165 |
+
|
| 166 |
+
# ββ Efficiency βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 167 |
+
fold_count = paper.fold_count
|
| 168 |
+
|
| 169 |
+
# Crease complexity: entropy of M/V assignment distribution
|
| 170 |
+
mv_assignments = [a for a in paper.assignments if a in ("M", "V")]
|
| 171 |
+
if mv_assignments:
|
| 172 |
+
total = len(mv_assignments)
|
| 173 |
+
m_count = mv_assignments.count("M")
|
| 174 |
+
v_count = mv_assignments.count("V")
|
| 175 |
+
p_m = m_count / total if total > 0 else 0
|
| 176 |
+
p_v = v_count / total if total > 0 else 0
|
| 177 |
+
crease_complexity = 0.0
|
| 178 |
+
if p_m > 0:
|
| 179 |
+
crease_complexity -= p_m * np.log2(p_m)
|
| 180 |
+
if p_v > 0:
|
| 181 |
+
crease_complexity -= p_v * np.log2(p_v)
|
| 182 |
+
else:
|
| 183 |
+
crease_complexity = 0.0
|
| 184 |
+
|
| 185 |
+
folding_efficiency = compactness / max(fold_count, 1)
|
| 186 |
+
|
| 187 |
+
# ββ Deployability βββββββββββββββββββββββββββββββββββββββββββββ
|
| 188 |
+
must_deploy = task.get("must_deploy", False)
|
| 189 |
+
# Simple deployability heuristic: if valid and compactness > 0, assume deployable
|
| 190 |
+
is_deployable = bool(validation.get("is_valid", False) and compactness > 0.01) if must_deploy else None
|
| 191 |
+
# Deployment force estimate from total energy gradient (rough)
|
| 192 |
+
deployment_force_estimate = float(energy.get("fold", 0.0)) / max(paper.original_area, 1e-6)
|
| 193 |
+
|
| 194 |
+
return {
|
| 195 |
+
# Validity (from validation dict)
|
| 196 |
+
"is_valid": validation.get("is_valid", False),
|
| 197 |
+
"kawasaki_violations": validation.get("kawasaki_violations", 0),
|
| 198 |
+
"kawasaki_total_error": validation.get("kawasaki_total_error", 0.0),
|
| 199 |
+
"maekawa_violations": validation.get("maekawa_violations", 0),
|
| 200 |
+
"self_intersections": validation.get("self_intersections", 0),
|
| 201 |
+
"strain_exceeded": validation.get("strain_exceeded", False),
|
| 202 |
+
|
| 203 |
+
# Compactness
|
| 204 |
+
"deployment_ratio": float(deployment_ratio),
|
| 205 |
+
"compactness": float(compactness),
|
| 206 |
+
"volume_compaction": float(volume_compaction),
|
| 207 |
+
"packing_efficiency": float(packing_efficiency),
|
| 208 |
+
"fits_target_box": fits_target_box,
|
| 209 |
+
"bounding_box": bb.tolist(),
|
| 210 |
+
|
| 211 |
+
# Structural
|
| 212 |
+
"max_strain": max_strain,
|
| 213 |
+
"mean_strain": mean_strain,
|
| 214 |
+
"total_energy": float(energy.get("total", 0.0)),
|
| 215 |
+
"energy_bar": float(energy.get("bar", 0.0)),
|
| 216 |
+
"energy_facet": float(energy.get("facet", 0.0)),
|
| 217 |
+
"energy_fold": float(energy.get("fold", 0.0)),
|
| 218 |
+
|
| 219 |
+
# Efficiency
|
| 220 |
+
"fold_count": fold_count,
|
| 221 |
+
"folding_efficiency": float(folding_efficiency),
|
| 222 |
+
"crease_complexity": float(crease_complexity),
|
| 223 |
+
|
| 224 |
+
# Deployability
|
| 225 |
+
"is_deployable": is_deployable,
|
| 226 |
+
"deployment_force_estimate": float(deployment_force_estimate),
|
| 227 |
+
|
| 228 |
+
# Shape similarity placeholders
|
| 229 |
+
"chamfer_distance": None,
|
| 230 |
+
"hausdorff_distance": None,
|
| 231 |
+
}
|
engine/paper.py
CHANGED
|
@@ -89,6 +89,10 @@ class Paper:
|
|
| 89 |
material: Material = field(default_factory=lambda: get_material("paper"))
|
| 90 |
rest_lengths: np.ndarray = field(default_factory=lambda: np.empty(0))
|
| 91 |
original_area: float = 0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
# ββ constructors ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 94 |
|
|
@@ -125,7 +129,7 @@ class Paper:
|
|
| 125 |
dtype=np.float64,
|
| 126 |
)
|
| 127 |
|
| 128 |
-
|
| 129 |
vertices=verts,
|
| 130 |
edges=edges,
|
| 131 |
faces=faces,
|
|
@@ -135,6 +139,8 @@ class Paper:
|
|
| 135 |
rest_lengths=rest_lengths,
|
| 136 |
original_area=width * height,
|
| 137 |
)
|
|
|
|
|
|
|
| 138 |
|
| 139 |
# ββ dict / prompt serialization (matches mock_env.PaperState.to_dict) ββ
|
| 140 |
|
|
@@ -165,6 +171,33 @@ class Paper:
|
|
| 165 |
},
|
| 166 |
}
|
| 167 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
# ββ FOLD format serialization βββββββββββββββββββββββββββββββββββ
|
| 169 |
|
| 170 |
def to_fold_json(self) -> str:
|
|
@@ -485,4 +518,8 @@ class Paper:
|
|
| 485 |
),
|
| 486 |
rest_lengths=self.rest_lengths.copy(),
|
| 487 |
original_area=self.original_area,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 488 |
)
|
|
|
|
| 89 |
material: Material = field(default_factory=lambda: get_material("paper"))
|
| 90 |
rest_lengths: np.ndarray = field(default_factory=lambda: np.empty(0))
|
| 91 |
original_area: float = 0.0
|
| 92 |
+
rest_positions: np.ndarray = field(default_factory=lambda: np.empty((0, 3)))
|
| 93 |
+
strain_per_vertex: np.ndarray = field(default_factory=lambda: np.empty(0))
|
| 94 |
+
energy: dict = field(default_factory=lambda: {"total": 0.0, "bar": 0.0, "facet": 0.0, "fold": 0.0})
|
| 95 |
+
fold_count: int = 0
|
| 96 |
|
| 97 |
# ββ constructors ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 98 |
|
|
|
|
| 129 |
dtype=np.float64,
|
| 130 |
)
|
| 131 |
|
| 132 |
+
paper = Paper(
|
| 133 |
vertices=verts,
|
| 134 |
edges=edges,
|
| 135 |
faces=faces,
|
|
|
|
| 139 |
rest_lengths=rest_lengths,
|
| 140 |
original_area=width * height,
|
| 141 |
)
|
| 142 |
+
paper.rest_positions = verts.copy()
|
| 143 |
+
return paper
|
| 144 |
|
| 145 |
# ββ dict / prompt serialization (matches mock_env.PaperState.to_dict) ββ
|
| 146 |
|
|
|
|
| 171 |
},
|
| 172 |
}
|
| 173 |
|
| 174 |
+
def to_observation_dict(self) -> dict:
|
| 175 |
+
bb = self.bounding_box
|
| 176 |
+
return {
|
| 177 |
+
"vertices_coords": self.vertices.tolist(),
|
| 178 |
+
"edges_vertices": self.edges.tolist(),
|
| 179 |
+
"faces_vertices": self.faces,
|
| 180 |
+
"edges_assignment": list(self.assignments),
|
| 181 |
+
"edges_foldAngle": self.fold_angles.tolist(),
|
| 182 |
+
"num_vertices": len(self.vertices),
|
| 183 |
+
"num_edges": len(self.edges),
|
| 184 |
+
"num_faces": len(self.faces),
|
| 185 |
+
"bounding_box": bb.tolist(),
|
| 186 |
+
"num_layers": self.num_layers,
|
| 187 |
+
"material": {
|
| 188 |
+
"name": self.material.name,
|
| 189 |
+
"thickness_mm": self.material.thickness_mm,
|
| 190 |
+
"youngs_modulus_gpa": self.material.youngs_modulus_gpa,
|
| 191 |
+
"max_strain": self.material.max_strain,
|
| 192 |
+
"poisson_ratio": self.material.poissons_ratio,
|
| 193 |
+
},
|
| 194 |
+
"strain_per_vertex": self.strain_per_vertex.tolist(),
|
| 195 |
+
"energy": dict(self.energy),
|
| 196 |
+
"fold_count": self.fold_count,
|
| 197 |
+
"width": float(self.original_area ** 0.5) if self.original_area > 0 else 1.0,
|
| 198 |
+
"height": float(self.original_area ** 0.5) if self.original_area > 0 else 1.0,
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
# ββ FOLD format serialization βββββββββββββββββββββββββββββββββββ
|
| 202 |
|
| 203 |
def to_fold_json(self) -> str:
|
|
|
|
| 518 |
),
|
| 519 |
rest_lengths=self.rest_lengths.copy(),
|
| 520 |
original_area=self.original_area,
|
| 521 |
+
rest_positions=self.rest_positions.copy(),
|
| 522 |
+
strain_per_vertex=self.strain_per_vertex.copy(),
|
| 523 |
+
energy=dict(self.energy),
|
| 524 |
+
fold_count=self.fold_count,
|
| 525 |
)
|
engine/physics.py
CHANGED
|
@@ -255,3 +255,263 @@ def _face_normal(verts: np.ndarray, face: list[int]) -> np.ndarray | None:
|
|
| 255 |
if norm < 1e-15:
|
| 256 |
return None
|
| 257 |
return normal / norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
if norm < 1e-15:
|
| 256 |
return None
|
| 257 |
return normal / norm
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 261 |
+
# Topology precomputation
|
| 262 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 263 |
+
|
| 264 |
+
def build_beam_list(paper: Paper) -> list[tuple[int, int, float, float]]:
|
| 265 |
+
"""Build list of (node_a, node_b, rest_len, k_axial) for every edge.
|
| 266 |
+
|
| 267 |
+
Uses normalized stiffness values (arch doc constants) scaled by material
|
| 268 |
+
Young's modulus ratio β keeps the Verlet integrator stable at unit scale.
|
| 269 |
+
"""
|
| 270 |
+
# Normalized stiffness constants (arch doc values)
|
| 271 |
+
K_AXIAL_BASE = 70.0
|
| 272 |
+
# Scale by material: paper (3 GPa) = 1.0 baseline
|
| 273 |
+
mat = paper.material
|
| 274 |
+
E_ratio = mat.youngs_modulus_gpa / 3.0
|
| 275 |
+
k_axial = K_AXIAL_BASE * E_ratio
|
| 276 |
+
|
| 277 |
+
beams = []
|
| 278 |
+
for ei, (v1, v2) in enumerate(paper.edges):
|
| 279 |
+
L0 = paper.rest_lengths[ei]
|
| 280 |
+
beams.append((int(v1), int(v2), float(L0), float(k_axial)))
|
| 281 |
+
return beams
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def build_crease_list(paper: Paper) -> list[tuple[int, int, int, int, float, float, str]]:
|
| 285 |
+
"""Build list of (n1, n2, n3, n4, target_angle_rad, k, type) for each crease hinge.
|
| 286 |
+
|
| 287 |
+
Each hinge is defined by 4 nodes: n1-n2 is the hinge edge, n3 and n4 are
|
| 288 |
+
the wing-tip nodes of the two adjacent faces.
|
| 289 |
+
type is 'fold' (M/V crease) or 'facet' (interior flat edge).
|
| 290 |
+
"""
|
| 291 |
+
verts = paper.vertices
|
| 292 |
+
|
| 293 |
+
# Build edge β face adjacency
|
| 294 |
+
edge_faces: dict[int, list[int]] = {}
|
| 295 |
+
for fi, face in enumerate(paper.faces):
|
| 296 |
+
n = len(face)
|
| 297 |
+
for k in range(n):
|
| 298 |
+
va, vb = face[k], face[(k + 1) % n]
|
| 299 |
+
for ei, e in enumerate(paper.edges):
|
| 300 |
+
if (e[0] == va and e[1] == vb) or (e[0] == vb and e[1] == va):
|
| 301 |
+
edge_faces.setdefault(ei, []).append(fi)
|
| 302 |
+
break
|
| 303 |
+
|
| 304 |
+
creases = []
|
| 305 |
+
for ei, adj in edge_faces.items():
|
| 306 |
+
if len(adj) < 2:
|
| 307 |
+
continue
|
| 308 |
+
f1, f2 = adj[0], adj[1]
|
| 309 |
+
face1, face2 = paper.faces[f1], paper.faces[f2]
|
| 310 |
+
n1, n2 = int(paper.edges[ei][0]), int(paper.edges[ei][1])
|
| 311 |
+
|
| 312 |
+
# Find wing-tip nodes (in each face, the vertex NOT on the shared edge)
|
| 313 |
+
wing1 = [v for v in face1 if v != n1 and v != n2]
|
| 314 |
+
wing2 = [v for v in face2 if v != n1 and v != n2]
|
| 315 |
+
if not wing1 or not wing2:
|
| 316 |
+
continue
|
| 317 |
+
n3, n4 = int(wing1[0]), int(wing2[0])
|
| 318 |
+
|
| 319 |
+
# Normalized stiffness constants (arch doc values), scaled by material
|
| 320 |
+
E_ratio = paper.material.youngs_modulus_gpa / 3.0
|
| 321 |
+
K_FACET = 0.2 * E_ratio
|
| 322 |
+
K_FOLD = 0.7 * E_ratio
|
| 323 |
+
|
| 324 |
+
asgn = paper.assignments[ei]
|
| 325 |
+
if asgn in ("M", "V"):
|
| 326 |
+
target = float(np.radians(paper.fold_angles[ei]))
|
| 327 |
+
k = K_FOLD
|
| 328 |
+
ctype = "fold"
|
| 329 |
+
else:
|
| 330 |
+
target = float(np.pi)
|
| 331 |
+
k = K_FACET
|
| 332 |
+
ctype = "facet"
|
| 333 |
+
|
| 334 |
+
creases.append((n1, n2, n3, n4, target, k, ctype))
|
| 335 |
+
return creases
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def _torque_to_forces(
|
| 339 |
+
p1: np.ndarray, p2: np.ndarray,
|
| 340 |
+
p3: np.ndarray, p4: np.ndarray,
|
| 341 |
+
torque: float,
|
| 342 |
+
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
| 343 |
+
"""Convert a dihedral torque into forces on the 4 hinge nodes.
|
| 344 |
+
|
| 345 |
+
p1-p2 is the hinge edge. p3 and p4 are wing tips.
|
| 346 |
+
Returns (f1, f2, f3, f4) as (3,) arrays.
|
| 347 |
+
"""
|
| 348 |
+
e = p2 - p1
|
| 349 |
+
e_len = np.linalg.norm(e)
|
| 350 |
+
if e_len < 1e-12:
|
| 351 |
+
zero = np.zeros(3)
|
| 352 |
+
return zero, zero, zero, zero
|
| 353 |
+
|
| 354 |
+
e_hat = e / e_len
|
| 355 |
+
|
| 356 |
+
# Perpendicular components of wing vectors relative to hinge
|
| 357 |
+
d3 = p3 - p1
|
| 358 |
+
d4 = p4 - p1
|
| 359 |
+
d3_perp = d3 - np.dot(d3, e_hat) * e_hat
|
| 360 |
+
d4_perp = d4 - np.dot(d4, e_hat) * e_hat
|
| 361 |
+
|
| 362 |
+
len3 = np.linalg.norm(d3_perp)
|
| 363 |
+
len4 = np.linalg.norm(d4_perp)
|
| 364 |
+
|
| 365 |
+
if len3 < 1e-12 or len4 < 1e-12:
|
| 366 |
+
zero = np.zeros(3)
|
| 367 |
+
return zero, zero, zero, zero
|
| 368 |
+
|
| 369 |
+
# Force on wing tips proportional to torque / lever arm
|
| 370 |
+
f3 = torque / (len3 * e_len) * np.cross(e_hat, d3_perp / len3)
|
| 371 |
+
f4 = -torque / (len4 * e_len) * np.cross(e_hat, d4_perp / len4)
|
| 372 |
+
|
| 373 |
+
# Reaction forces distributed to hinge nodes
|
| 374 |
+
f1 = -(f3 + f4) * 0.5
|
| 375 |
+
f2 = -(f3 + f4) * 0.5
|
| 376 |
+
|
| 377 |
+
return f1, f2, f3, f4
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 381 |
+
# Verlet solver
|
| 382 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 383 |
+
|
| 384 |
+
def simulate(
|
| 385 |
+
paper: Paper,
|
| 386 |
+
fold_percent: float = 1.0,
|
| 387 |
+
n_steps: int = 500,
|
| 388 |
+
dt: float = 0.005,
|
| 389 |
+
damping: float = 0.15,
|
| 390 |
+
) -> Paper:
|
| 391 |
+
"""Run bar-and-hinge Verlet integration to relax the mesh.
|
| 392 |
+
|
| 393 |
+
Updates paper.vertices, paper.strain_per_vertex, and paper.energy in-place.
|
| 394 |
+
Returns the mutated paper for chaining.
|
| 395 |
+
|
| 396 |
+
Parameters
|
| 397 |
+
----------
|
| 398 |
+
paper : Paper
|
| 399 |
+
Paper state after a fold has been applied (vertices already rotated).
|
| 400 |
+
fold_percent : float
|
| 401 |
+
How far along the fold to drive (0=flat, 1=full target angle).
|
| 402 |
+
n_steps : int
|
| 403 |
+
Maximum integration steps.
|
| 404 |
+
dt : float
|
| 405 |
+
Time step. Keep small (0.005) for stability with stiff materials.
|
| 406 |
+
damping : float
|
| 407 |
+
Velocity damping coefficient (0=undamped, 1=fully damped).
|
| 408 |
+
"""
|
| 409 |
+
if len(paper.vertices) == 0:
|
| 410 |
+
return paper
|
| 411 |
+
|
| 412 |
+
beams = build_beam_list(paper)
|
| 413 |
+
creases = build_crease_list(paper)
|
| 414 |
+
|
| 415 |
+
pos = paper.vertices.copy() # (N, 3) current positions
|
| 416 |
+
last_pos = pos.copy() # (N, 3) previous positions (Verlet)
|
| 417 |
+
|
| 418 |
+
max_force_cap = 1e6 # prevent runaway forces
|
| 419 |
+
|
| 420 |
+
for _ in range(n_steps):
|
| 421 |
+
forces = np.zeros_like(pos)
|
| 422 |
+
|
| 423 |
+
# ββ Beam (axial spring) forces βββββββββββββββββββββββββββββββ
|
| 424 |
+
for (a, b, L0, k) in beams:
|
| 425 |
+
delta = pos[b] - pos[a]
|
| 426 |
+
L = np.linalg.norm(delta)
|
| 427 |
+
if L < 1e-12:
|
| 428 |
+
continue
|
| 429 |
+
strain = (L - L0) / L0
|
| 430 |
+
F_mag = k * strain
|
| 431 |
+
F_vec = F_mag * (delta / L)
|
| 432 |
+
# Clamp to prevent instability
|
| 433 |
+
F_vec = np.clip(F_vec, -max_force_cap, max_force_cap)
|
| 434 |
+
forces[a] += F_vec
|
| 435 |
+
forces[b] -= F_vec
|
| 436 |
+
|
| 437 |
+
# ββ Crease (dihedral spring) forces βββββββββββββββββββββββββ
|
| 438 |
+
for (n1, n2, n3, n4, target, k, ctype) in creases:
|
| 439 |
+
actual_target = target * fold_percent if ctype == "fold" else target
|
| 440 |
+
try:
|
| 441 |
+
theta = _compute_dihedral_rad(pos[n1], pos[n2], pos[n3], pos[n4])
|
| 442 |
+
except Exception:
|
| 443 |
+
continue
|
| 444 |
+
delta_theta = theta - actual_target
|
| 445 |
+
edge_len = np.linalg.norm(pos[n2] - pos[n1])
|
| 446 |
+
torque = k * edge_len * delta_theta
|
| 447 |
+
torque = float(np.clip(torque, -max_force_cap, max_force_cap))
|
| 448 |
+
|
| 449 |
+
f1, f2, f3, f4 = _torque_to_forces(
|
| 450 |
+
pos[n1], pos[n2], pos[n3], pos[n4], torque
|
| 451 |
+
)
|
| 452 |
+
forces[n1] += np.clip(f1, -max_force_cap, max_force_cap)
|
| 453 |
+
forces[n2] += np.clip(f2, -max_force_cap, max_force_cap)
|
| 454 |
+
forces[n3] += np.clip(f3, -max_force_cap, max_force_cap)
|
| 455 |
+
forces[n4] += np.clip(f4, -max_force_cap, max_force_cap)
|
| 456 |
+
|
| 457 |
+
# ββ Verlet integration βββββββββββββββββββββββββββββββββββββββ
|
| 458 |
+
new_pos = pos + (1.0 - damping) * (pos - last_pos) + forces * (dt * dt)
|
| 459 |
+
|
| 460 |
+
# NaN guard
|
| 461 |
+
if np.any(np.isnan(new_pos)):
|
| 462 |
+
break
|
| 463 |
+
|
| 464 |
+
last_pos = pos
|
| 465 |
+
pos = new_pos
|
| 466 |
+
|
| 467 |
+
# ββ Convergence check ββββββββββββββββββββββββββββββββββββββββ
|
| 468 |
+
kinetic = np.sum((pos - last_pos) ** 2)
|
| 469 |
+
if kinetic < 1e-12:
|
| 470 |
+
break
|
| 471 |
+
|
| 472 |
+
# ββ Write results back to paper ββββββββββββββββββββββββββββββββββ
|
| 473 |
+
paper.vertices = pos
|
| 474 |
+
paper.strain_per_vertex = compute_strain(paper)
|
| 475 |
+
paper.energy = {
|
| 476 |
+
"total": compute_total_energy(paper),
|
| 477 |
+
"bar": compute_bar_energy(paper),
|
| 478 |
+
"facet": compute_facet_energy(paper),
|
| 479 |
+
"fold": compute_fold_energy(paper),
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
return paper
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def _compute_dihedral_rad(
|
| 486 |
+
p1: np.ndarray, p2: np.ndarray,
|
| 487 |
+
p3: np.ndarray, p4: np.ndarray,
|
| 488 |
+
) -> float:
|
| 489 |
+
"""Dihedral angle in radians between planes (p1,p2,p3) and (p1,p2,p4).
|
| 490 |
+
|
| 491 |
+
p1-p2 is the hinge edge. p3 and p4 are the wing tips.
|
| 492 |
+
Returns angle in [0, 2*pi).
|
| 493 |
+
"""
|
| 494 |
+
e = p2 - p1
|
| 495 |
+
e_norm = np.linalg.norm(e)
|
| 496 |
+
if e_norm < 1e-12:
|
| 497 |
+
return float(np.pi)
|
| 498 |
+
e_hat = e / e_norm
|
| 499 |
+
|
| 500 |
+
n1 = np.cross(p3 - p1, e)
|
| 501 |
+
n2 = np.cross(e, p4 - p1)
|
| 502 |
+
len1 = np.linalg.norm(n1)
|
| 503 |
+
len2 = np.linalg.norm(n2)
|
| 504 |
+
if len1 < 1e-12 or len2 < 1e-12:
|
| 505 |
+
return float(np.pi)
|
| 506 |
+
|
| 507 |
+
n1 = n1 / len1
|
| 508 |
+
n2 = n2 / len2
|
| 509 |
+
|
| 510 |
+
cos_a = float(np.clip(np.dot(n1, n2), -1.0, 1.0))
|
| 511 |
+
angle = np.arccos(cos_a)
|
| 512 |
+
|
| 513 |
+
cross = np.cross(n1, n2)
|
| 514 |
+
if np.dot(cross, e_hat) < 0:
|
| 515 |
+
angle = 2.0 * np.pi - angle
|
| 516 |
+
|
| 517 |
+
return float(angle)
|
engine/validation.py
CHANGED
|
@@ -254,3 +254,25 @@ def validate_paper(paper: Paper) -> ValidationResult:
|
|
| 254 |
self_intersection_count=si_count,
|
| 255 |
is_valid=k_valid and m_valid and si_valid,
|
| 256 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
self_intersection_count=si_count,
|
| 255 |
is_valid=k_valid and m_valid and si_valid,
|
| 256 |
)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def validate_state(paper: Paper) -> dict:
|
| 260 |
+
"""Run all validation checks and return a flat dict.
|
| 261 |
+
|
| 262 |
+
This is the interface used by OrigamiEnvironment. It calls the
|
| 263 |
+
existing validation functions and returns a dict with all fields
|
| 264 |
+
the environment and metrics system need.
|
| 265 |
+
"""
|
| 266 |
+
result = validate_paper(paper)
|
| 267 |
+
strain_exceeded = bool(
|
| 268 |
+
len(paper.strain_per_vertex) > 0
|
| 269 |
+
and float(paper.strain_per_vertex.max()) > paper.material.max_strain
|
| 270 |
+
)
|
| 271 |
+
return {
|
| 272 |
+
"is_valid": result.is_valid and not strain_exceeded,
|
| 273 |
+
"kawasaki_violations": int(not result.kawasaki_valid),
|
| 274 |
+
"kawasaki_total_error": float(result.kawasaki_violation),
|
| 275 |
+
"maekawa_violations": int(not result.maekawa_valid),
|
| 276 |
+
"self_intersections": result.self_intersection_count,
|
| 277 |
+
"strain_exceeded": strain_exceeded,
|
| 278 |
+
}
|
env/__init__.py
ADDED
|
File without changes
|
env/environment.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import copy
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from .paper_state import PaperState
|
| 8 |
+
from .rewards import compute_reward, compute_terminal_reward, load_target, target_crease_edges
|
| 9 |
+
from .prompts import (
|
| 10 |
+
code_as_policy_prompt,
|
| 11 |
+
step_level_prompt,
|
| 12 |
+
parse_fold_list,
|
| 13 |
+
parse_single_fold,
|
| 14 |
+
)
|
| 15 |
+
from .verifier import check_all_vertices
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
TARGETS_DIR = Path(__file__).parent / 'targets'
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class OrigamiEnvironment:
|
| 22 |
+
"""
|
| 23 |
+
OpenEnv-compatible origami crease pattern environment.
|
| 24 |
+
|
| 25 |
+
Supports two modes:
|
| 26 |
+
- code_as_policy: model outputs complete fold sequence, gets terminal reward
|
| 27 |
+
- step: model outputs one fold at a time, gets per-step reward
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
mode: str = 'code_as_policy', # 'code_as_policy' or 'step'
|
| 33 |
+
max_steps: int = 8,
|
| 34 |
+
targets_dir: Optional[str] = None,
|
| 35 |
+
):
|
| 36 |
+
assert mode in ('code_as_policy', 'step'), f"Unknown mode: {mode}"
|
| 37 |
+
self.mode = mode
|
| 38 |
+
self.max_steps = max_steps
|
| 39 |
+
self.targets_dir = Path(targets_dir) if targets_dir else TARGETS_DIR
|
| 40 |
+
|
| 41 |
+
self.paper: Optional[PaperState] = None
|
| 42 |
+
self.target: Optional[dict] = None
|
| 43 |
+
self.target_name: Optional[str] = None
|
| 44 |
+
self.step_count: int = 0
|
| 45 |
+
self.last_reward: Optional[dict] = None
|
| 46 |
+
|
| 47 |
+
# Cache all available targets
|
| 48 |
+
self._targets = self._load_all_targets()
|
| 49 |
+
|
| 50 |
+
def _load_all_targets(self) -> dict[str, dict]:
|
| 51 |
+
targets = {}
|
| 52 |
+
for fold_file in self.targets_dir.glob('*.fold'):
|
| 53 |
+
with open(fold_file) as f:
|
| 54 |
+
targets[fold_file.stem] = json.load(f)
|
| 55 |
+
return targets
|
| 56 |
+
|
| 57 |
+
def available_targets(self) -> list[str]:
|
| 58 |
+
return sorted(self._targets.keys())
|
| 59 |
+
|
| 60 |
+
def reset(self, target_name: Optional[str] = None) -> dict:
|
| 61 |
+
"""
|
| 62 |
+
Reset environment to start of a new episode.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
target_name: name of target (stem of .fold file). If None, picks level-1 randomly.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
observation dict with 'prompt' key containing the LLM prompt string.
|
| 69 |
+
"""
|
| 70 |
+
import random
|
| 71 |
+
|
| 72 |
+
if target_name:
|
| 73 |
+
assert target_name in self._targets, f"Unknown target: {target_name}"
|
| 74 |
+
self.target_name = target_name
|
| 75 |
+
else:
|
| 76 |
+
# Default to level-1 targets
|
| 77 |
+
level1 = [k for k, v in self._targets.items() if v.get('level', 1) == 1]
|
| 78 |
+
self.target_name = random.choice(level1 if level1 else list(self._targets.keys()))
|
| 79 |
+
|
| 80 |
+
self.target = self._targets[self.target_name]
|
| 81 |
+
self.paper = PaperState()
|
| 82 |
+
self.step_count = 0
|
| 83 |
+
self.last_reward = None
|
| 84 |
+
|
| 85 |
+
return self._get_observation()
|
| 86 |
+
|
| 87 |
+
def step(self, action) -> tuple[dict, dict, bool, dict]:
|
| 88 |
+
"""
|
| 89 |
+
Execute an action.
|
| 90 |
+
|
| 91 |
+
In code_as_policy mode: action is a string (model completion with <folds> tags)
|
| 92 |
+
OR a list of fold dicts already parsed.
|
| 93 |
+
In step mode: action is a string (single fold JSON) or dict.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
(observation, reward, done, info)
|
| 97 |
+
"""
|
| 98 |
+
if self.mode == 'code_as_policy':
|
| 99 |
+
return self._step_sequence(action)
|
| 100 |
+
else:
|
| 101 |
+
return self._step_single(action)
|
| 102 |
+
|
| 103 |
+
def _step_sequence(self, action) -> tuple[dict, dict, bool, dict]:
|
| 104 |
+
"""Execute a complete fold sequence (code-as-policy mode)."""
|
| 105 |
+
# Parse action if it's a string
|
| 106 |
+
if isinstance(action, str):
|
| 107 |
+
try:
|
| 108 |
+
folds = parse_fold_list(action)
|
| 109 |
+
except ValueError as e:
|
| 110 |
+
bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
|
| 111 |
+
return self._get_observation(), bad_reward, True, self._info()
|
| 112 |
+
else:
|
| 113 |
+
folds = action # already a list of dicts
|
| 114 |
+
|
| 115 |
+
# Execute each fold sequentially
|
| 116 |
+
last_result = {'valid': True, 'anchored': True, 'new_vertices': [], 'errors': []}
|
| 117 |
+
for fold in folds:
|
| 118 |
+
try:
|
| 119 |
+
p1 = fold['from']
|
| 120 |
+
p2 = fold['to']
|
| 121 |
+
assignment = fold['assignment']
|
| 122 |
+
except (KeyError, TypeError) as e:
|
| 123 |
+
last_result = {'valid': False, 'anchored': False, 'new_vertices': [], 'errors': [str(e)]}
|
| 124 |
+
break
|
| 125 |
+
|
| 126 |
+
last_result = self.paper.add_crease(p1, p2, assignment)
|
| 127 |
+
self.step_count += 1
|
| 128 |
+
if not last_result['valid']:
|
| 129 |
+
break # stop at first invalid fold, partial credit
|
| 130 |
+
|
| 131 |
+
reward = compute_terminal_reward(self.paper, self.target)
|
| 132 |
+
self.last_reward = reward
|
| 133 |
+
return self._get_observation(), reward, True, self._info()
|
| 134 |
+
|
| 135 |
+
def _step_single(self, action) -> tuple[dict, dict, bool, dict]:
|
| 136 |
+
"""Execute a single fold (step mode)."""
|
| 137 |
+
if isinstance(action, str):
|
| 138 |
+
try:
|
| 139 |
+
fold = parse_single_fold(action)
|
| 140 |
+
except ValueError as e:
|
| 141 |
+
bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
|
| 142 |
+
self.last_reward = bad_reward
|
| 143 |
+
done = self.step_count >= self.max_steps
|
| 144 |
+
return self._get_observation(), bad_reward, done, self._info()
|
| 145 |
+
else:
|
| 146 |
+
fold = action
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
p1 = fold['from']
|
| 150 |
+
p2 = fold['to']
|
| 151 |
+
assignment = fold['assignment']
|
| 152 |
+
except (KeyError, TypeError) as e:
|
| 153 |
+
bad_reward = {'format': 0.0, 'total': -0.1, 'error': str(e)}
|
| 154 |
+
self.last_reward = bad_reward
|
| 155 |
+
done = self.step_count >= self.max_steps
|
| 156 |
+
return self._get_observation(), bad_reward, done, self._info()
|
| 157 |
+
|
| 158 |
+
result = self.paper.add_crease(p1, p2, assignment)
|
| 159 |
+
self.step_count += 1
|
| 160 |
+
|
| 161 |
+
reward = compute_reward(self.paper, result, self.target)
|
| 162 |
+
self.last_reward = reward
|
| 163 |
+
|
| 164 |
+
done = (
|
| 165 |
+
self.step_count >= self.max_steps or
|
| 166 |
+
reward.get('completion', 0) > 0
|
| 167 |
+
)
|
| 168 |
+
return self._get_observation(), reward, done, self._info()
|
| 169 |
+
|
| 170 |
+
def _get_observation(self) -> dict:
|
| 171 |
+
"""Returns observation dict with the LLM prompt and raw state."""
|
| 172 |
+
if self.mode == 'code_as_policy':
|
| 173 |
+
prompt = code_as_policy_prompt(self.target, max_folds=self.max_steps)
|
| 174 |
+
else:
|
| 175 |
+
prompt = step_level_prompt(
|
| 176 |
+
target=self.target,
|
| 177 |
+
paper_state=self.paper,
|
| 178 |
+
step=self.step_count,
|
| 179 |
+
max_steps=self.max_steps,
|
| 180 |
+
last_reward=self.last_reward,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
return {
|
| 184 |
+
'prompt': prompt,
|
| 185 |
+
'target_name': self.target_name,
|
| 186 |
+
'step': self.step_count,
|
| 187 |
+
'paper_fold_json': self.paper.graph.edges if self.paper else {},
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
def _info(self) -> dict:
|
| 191 |
+
"""Returns diagnostic info dict for logging."""
|
| 192 |
+
if self.paper is None:
|
| 193 |
+
return {}
|
| 194 |
+
|
| 195 |
+
interior = self.paper.graph.interior_vertices()
|
| 196 |
+
vertex_scores = check_all_vertices(self.paper.graph)
|
| 197 |
+
|
| 198 |
+
return {
|
| 199 |
+
'local_foldability': (
|
| 200 |
+
vertex_scores['kawasaki'] == 1.0 and
|
| 201 |
+
vertex_scores['maekawa'] == 1.0
|
| 202 |
+
),
|
| 203 |
+
'blb_satisfied': vertex_scores['blb'] == 1.0,
|
| 204 |
+
'global_foldability': 'not_checked', # NP-complete (Bern-Hayes 1996)
|
| 205 |
+
'n_interior_vertices': len(interior),
|
| 206 |
+
'n_creases': len(self.paper.graph.crease_edges()),
|
| 207 |
+
'target_name': self.target_name,
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
def state(self) -> dict:
|
| 211 |
+
"""Returns current environment state for logging/inspection."""
|
| 212 |
+
return {
|
| 213 |
+
'paper': {
|
| 214 |
+
'vertices': dict(self.paper.graph.vertices),
|
| 215 |
+
'edges': {
|
| 216 |
+
k: v for k, v in self.paper.graph.edges.items()
|
| 217 |
+
if v[2] in ('M', 'V')
|
| 218 |
+
},
|
| 219 |
+
'fold_history': self.paper.fold_history,
|
| 220 |
+
},
|
| 221 |
+
'target': self.target_name,
|
| 222 |
+
'step': self.step_count,
|
| 223 |
+
'mode': self.mode,
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
def close(self):
|
| 227 |
+
"""Cleanup."""
|
| 228 |
+
pass
|
| 229 |
+
|
| 230 |
+
def clone(self) -> 'OrigamiEnvironment':
|
| 231 |
+
"""Return a deep copy for parallel evaluation (used in GRPO)."""
|
| 232 |
+
new_env = OrigamiEnvironment(
|
| 233 |
+
mode=self.mode,
|
| 234 |
+
max_steps=self.max_steps,
|
| 235 |
+
targets_dir=str(self.targets_dir),
|
| 236 |
+
)
|
| 237 |
+
if self.paper is not None:
|
| 238 |
+
new_env.paper = copy.deepcopy(self.paper)
|
| 239 |
+
new_env.target = self.target
|
| 240 |
+
new_env.target_name = self.target_name
|
| 241 |
+
new_env.step_count = self.step_count
|
| 242 |
+
new_env.last_reward = self.last_reward
|
| 243 |
+
return new_env
|
env/graph.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
BOUNDARY_TOL = 1e-9
|
| 5 |
+
VERTEX_TOL = 1e-9
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class CreaseGraph:
|
| 9 |
+
"""
|
| 10 |
+
Planar graph representing an origami crease pattern on a unit square.
|
| 11 |
+
|
| 12 |
+
Vertices: points in [0,1]x[0,1], deduplicated by proximity.
|
| 13 |
+
Edges: segments between vertices, labeled M (mountain), V (valley), or B (boundary).
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.vertices: dict[int, tuple[float, float]] = {}
|
| 18 |
+
self.edges: dict[int, tuple[int, int, str]] = {}
|
| 19 |
+
self.vertex_edges: dict[int, list[int]] = {}
|
| 20 |
+
self._next_vertex_id: int = 0
|
| 21 |
+
self._next_edge_id: int = 0
|
| 22 |
+
|
| 23 |
+
corners = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
|
| 24 |
+
for x, y in corners:
|
| 25 |
+
vid = self._next_vertex_id
|
| 26 |
+
self.vertices[vid] = (x, y)
|
| 27 |
+
self.vertex_edges[vid] = []
|
| 28 |
+
self._next_vertex_id += 1
|
| 29 |
+
|
| 30 |
+
boundary_pairs = [(0, 1), (1, 2), (2, 3), (3, 0)]
|
| 31 |
+
for v1, v2 in boundary_pairs:
|
| 32 |
+
eid = self._next_edge_id
|
| 33 |
+
self.edges[eid] = (v1, v2, 'B')
|
| 34 |
+
self.vertex_edges[v1].append(eid)
|
| 35 |
+
self.vertex_edges[v2].append(eid)
|
| 36 |
+
self._next_edge_id += 1
|
| 37 |
+
|
| 38 |
+
def add_vertex(self, x: float, y: float) -> int:
|
| 39 |
+
for vid, (vx, vy) in self.vertices.items():
|
| 40 |
+
if abs(vx - x) < VERTEX_TOL and abs(vy - y) < VERTEX_TOL:
|
| 41 |
+
return vid
|
| 42 |
+
vid = self._next_vertex_id
|
| 43 |
+
self.vertices[vid] = (float(x), float(y))
|
| 44 |
+
self.vertex_edges[vid] = []
|
| 45 |
+
self._next_vertex_id += 1
|
| 46 |
+
return vid
|
| 47 |
+
|
| 48 |
+
def add_edge(self, v1_id: int, v2_id: int, assignment: str) -> int:
|
| 49 |
+
pair = frozenset((v1_id, v2_id))
|
| 50 |
+
for eid, (ev1, ev2, _) in self.edges.items():
|
| 51 |
+
if frozenset((ev1, ev2)) == pair:
|
| 52 |
+
return eid
|
| 53 |
+
eid = self._next_edge_id
|
| 54 |
+
self.edges[eid] = (v1_id, v2_id, assignment)
|
| 55 |
+
self.vertex_edges[v1_id].append(eid)
|
| 56 |
+
self.vertex_edges[v2_id].append(eid)
|
| 57 |
+
self._next_edge_id += 1
|
| 58 |
+
return eid
|
| 59 |
+
|
| 60 |
+
def get_cyclic_edges(self, vertex_id: int) -> list[int]:
|
| 61 |
+
vx, vy = self.vertices[vertex_id]
|
| 62 |
+
edge_ids = self.vertex_edges[vertex_id]
|
| 63 |
+
|
| 64 |
+
def angle_of_edge(eid: int) -> float:
|
| 65 |
+
ev1, ev2, _ = self.edges[eid]
|
| 66 |
+
other_id = ev2 if ev1 == vertex_id else ev1
|
| 67 |
+
ox, oy = self.vertices[other_id]
|
| 68 |
+
return float(np.arctan2(oy - vy, ox - vx))
|
| 69 |
+
|
| 70 |
+
return sorted(edge_ids, key=angle_of_edge)
|
| 71 |
+
|
| 72 |
+
def interior_vertices(self) -> list[int]:
|
| 73 |
+
result = []
|
| 74 |
+
for vid, (x, y) in self.vertices.items():
|
| 75 |
+
if (
|
| 76 |
+
x > BOUNDARY_TOL
|
| 77 |
+
and x < 1.0 - BOUNDARY_TOL
|
| 78 |
+
and y > BOUNDARY_TOL
|
| 79 |
+
and y < 1.0 - BOUNDARY_TOL
|
| 80 |
+
):
|
| 81 |
+
result.append(vid)
|
| 82 |
+
return result
|
| 83 |
+
|
| 84 |
+
def split_edge(self, edge_id: int, new_vertex_id: int) -> tuple[int, int]:
|
| 85 |
+
ev1, ev2, assignment = self.edges[edge_id]
|
| 86 |
+
|
| 87 |
+
del self.edges[edge_id]
|
| 88 |
+
if edge_id in self.vertex_edges[ev1]:
|
| 89 |
+
self.vertex_edges[ev1].remove(edge_id)
|
| 90 |
+
if edge_id in self.vertex_edges[ev2]:
|
| 91 |
+
self.vertex_edges[ev2].remove(edge_id)
|
| 92 |
+
|
| 93 |
+
eid1 = self._next_edge_id
|
| 94 |
+
self.edges[eid1] = (ev1, new_vertex_id, assignment)
|
| 95 |
+
self.vertex_edges[ev1].append(eid1)
|
| 96 |
+
self.vertex_edges[new_vertex_id].append(eid1)
|
| 97 |
+
self._next_edge_id += 1
|
| 98 |
+
|
| 99 |
+
eid2 = self._next_edge_id
|
| 100 |
+
self.edges[eid2] = (new_vertex_id, ev2, assignment)
|
| 101 |
+
self.vertex_edges[new_vertex_id].append(eid2)
|
| 102 |
+
self.vertex_edges[ev2].append(eid2)
|
| 103 |
+
self._next_edge_id += 1
|
| 104 |
+
|
| 105 |
+
return (eid1, eid2)
|
| 106 |
+
|
| 107 |
+
def crease_edges(self) -> list[int]:
|
| 108 |
+
return [eid for eid, (_, _, a) in self.edges.items() if a in ('M', 'V')]
|
| 109 |
+
|
| 110 |
+
def boundary_midpoints(self) -> list[tuple[float, float]]:
|
| 111 |
+
midpoints = []
|
| 112 |
+
for eid, (v1, v2, assignment) in self.edges.items():
|
| 113 |
+
if assignment == 'B':
|
| 114 |
+
x1, y1 = self.vertices[v1]
|
| 115 |
+
x2, y2 = self.vertices[v2]
|
| 116 |
+
midpoints.append(((x1 + x2) / 2.0, (y1 + y2) / 2.0))
|
| 117 |
+
return midpoints
|
env/paper_state.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from shapely.geometry import LineString, Point, Polygon
|
| 3 |
+
from shapely.ops import unary_union
|
| 4 |
+
from typing import Optional
|
| 5 |
+
from .graph import CreaseGraph, VERTEX_TOL
|
| 6 |
+
|
| 7 |
+
UNIT_SQUARE_CORNERS = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
|
| 8 |
+
|
| 9 |
+
_UNIT_SQUARE = Polygon(UNIT_SQUARE_CORNERS)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PaperState:
|
| 13 |
+
"""
|
| 14 |
+
Represents the evolving crease pattern on a unit square [0,1]x[0,1].
|
| 15 |
+
Uses CreaseGraph for the underlying data structure.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.graph = CreaseGraph()
|
| 20 |
+
self.fold_history: list[dict] = []
|
| 21 |
+
|
| 22 |
+
def anchor_points(self) -> list[tuple[float, float]]:
|
| 23 |
+
points: dict[tuple[float, float], None] = {}
|
| 24 |
+
for corner in UNIT_SQUARE_CORNERS:
|
| 25 |
+
points[corner] = None
|
| 26 |
+
for vid, (x, y) in self.graph.vertices.items():
|
| 27 |
+
points[(float(x), float(y))] = None
|
| 28 |
+
return list(points.keys())
|
| 29 |
+
|
| 30 |
+
def _is_anchor(self, pt: tuple[float, float]) -> bool:
|
| 31 |
+
px, py = pt
|
| 32 |
+
for ax, ay in self.anchor_points():
|
| 33 |
+
if abs(ax - px) < VERTEX_TOL and abs(ay - py) < VERTEX_TOL:
|
| 34 |
+
return True
|
| 35 |
+
return False
|
| 36 |
+
|
| 37 |
+
def add_crease(self, p1: list, p2: list, assignment: str) -> dict:
|
| 38 |
+
errors: list[str] = []
|
| 39 |
+
|
| 40 |
+
if assignment not in ('M', 'V'):
|
| 41 |
+
return {
|
| 42 |
+
'valid': False,
|
| 43 |
+
'anchored': False,
|
| 44 |
+
'new_vertices': [],
|
| 45 |
+
'errors': ['invalid_assignment'],
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
p1 = (float(p1[0]), float(p1[1]))
|
| 49 |
+
p2 = (float(p2[0]), float(p2[1]))
|
| 50 |
+
|
| 51 |
+
anchored = self._is_anchor(p1) and self._is_anchor(p2)
|
| 52 |
+
|
| 53 |
+
seg_len = np.hypot(p2[0] - p1[0], p2[1] - p1[1])
|
| 54 |
+
if seg_len < VERTEX_TOL:
|
| 55 |
+
errors.append('zero_length')
|
| 56 |
+
return {'valid': False, 'anchored': anchored, 'new_vertices': [], 'errors': errors}
|
| 57 |
+
|
| 58 |
+
new_line = LineString([p1, p2])
|
| 59 |
+
|
| 60 |
+
if not _UNIT_SQUARE.contains(new_line) and not _UNIT_SQUARE.boundary.contains(new_line):
|
| 61 |
+
clipped = new_line.intersection(_UNIT_SQUARE)
|
| 62 |
+
if clipped.is_empty:
|
| 63 |
+
errors.append('outside_bounds')
|
| 64 |
+
return {'valid': False, 'anchored': anchored, 'new_vertices': [], 'errors': errors}
|
| 65 |
+
|
| 66 |
+
intersection_points: list[tuple[float, float]] = []
|
| 67 |
+
|
| 68 |
+
for eid, (ev1, ev2, _) in list(self.graph.edges.items()):
|
| 69 |
+
ex1, ey1 = self.graph.vertices[ev1]
|
| 70 |
+
ex2, ey2 = self.graph.vertices[ev2]
|
| 71 |
+
existing_line = LineString([(ex1, ey1), (ex2, ey2)])
|
| 72 |
+
inter = new_line.intersection(existing_line)
|
| 73 |
+
|
| 74 |
+
if inter.is_empty:
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
if inter.geom_type == 'Point':
|
| 78 |
+
ix, iy = inter.x, inter.y
|
| 79 |
+
ep1 = (ex1, ey1)
|
| 80 |
+
ep2 = (ex2, ey2)
|
| 81 |
+
if (
|
| 82 |
+
abs(ix - ep1[0]) < VERTEX_TOL and abs(iy - ep1[1]) < VERTEX_TOL
|
| 83 |
+
or abs(ix - ep2[0]) < VERTEX_TOL and abs(iy - ep2[1]) < VERTEX_TOL
|
| 84 |
+
):
|
| 85 |
+
continue
|
| 86 |
+
intersection_points.append((ix, iy))
|
| 87 |
+
# MultiPoint or LineString intersections (collinear) are skipped
|
| 88 |
+
|
| 89 |
+
new_vertex_coords: list[tuple[float, float]] = []
|
| 90 |
+
for ix, iy in intersection_points:
|
| 91 |
+
before = set(self.graph.vertices.keys())
|
| 92 |
+
vid = self.graph.add_vertex(ix, iy)
|
| 93 |
+
if vid not in before:
|
| 94 |
+
new_vertex_coords.append((ix, iy))
|
| 95 |
+
|
| 96 |
+
for eid in list(self.graph.edges.keys()):
|
| 97 |
+
if eid not in self.graph.edges:
|
| 98 |
+
continue
|
| 99 |
+
ev1, ev2, _ = self.graph.edges[eid]
|
| 100 |
+
ex1, ey1 = self.graph.vertices[ev1]
|
| 101 |
+
ex2, ey2 = self.graph.vertices[ev2]
|
| 102 |
+
seg = LineString([(ex1, ey1), (ex2, ey2)])
|
| 103 |
+
pt = Point(ix, iy)
|
| 104 |
+
if seg.distance(pt) < VERTEX_TOL:
|
| 105 |
+
if ev1 != vid and ev2 != vid:
|
| 106 |
+
self.graph.split_edge(eid, vid)
|
| 107 |
+
|
| 108 |
+
v1_id = self.graph.add_vertex(p1[0], p1[1])
|
| 109 |
+
v2_id = self.graph.add_vertex(p2[0], p2[1])
|
| 110 |
+
|
| 111 |
+
waypoints = [p1] + sorted(
|
| 112 |
+
intersection_points,
|
| 113 |
+
key=lambda pt: np.hypot(pt[0] - p1[0], pt[1] - p1[1]),
|
| 114 |
+
) + [p2]
|
| 115 |
+
|
| 116 |
+
waypoint_ids = []
|
| 117 |
+
for wp in waypoints:
|
| 118 |
+
wid = self.graph.add_vertex(wp[0], wp[1])
|
| 119 |
+
waypoint_ids.append(wid)
|
| 120 |
+
|
| 121 |
+
for i in range(len(waypoint_ids) - 1):
|
| 122 |
+
wa = waypoint_ids[i]
|
| 123 |
+
wb = waypoint_ids[i + 1]
|
| 124 |
+
if wa != wb:
|
| 125 |
+
self.graph.add_edge(wa, wb, assignment)
|
| 126 |
+
|
| 127 |
+
record = {
|
| 128 |
+
'p1': p1,
|
| 129 |
+
'p2': p2,
|
| 130 |
+
'assignment': assignment,
|
| 131 |
+
'anchored': anchored,
|
| 132 |
+
'new_vertices': new_vertex_coords,
|
| 133 |
+
}
|
| 134 |
+
self.fold_history.append(record)
|
| 135 |
+
|
| 136 |
+
return {
|
| 137 |
+
'valid': True,
|
| 138 |
+
'anchored': anchored,
|
| 139 |
+
'new_vertices': new_vertex_coords,
|
| 140 |
+
'errors': errors,
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
def crease_edges(self) -> list[dict]:
|
| 144 |
+
result = []
|
| 145 |
+
for eid in self.graph.crease_edges():
|
| 146 |
+
v1, v2, assignment = self.graph.edges[eid]
|
| 147 |
+
x1, y1 = self.graph.vertices[v1]
|
| 148 |
+
x2, y2 = self.graph.vertices[v2]
|
| 149 |
+
result.append({'v1': (x1, y1), 'v2': (x2, y2), 'assignment': assignment})
|
| 150 |
+
return result
|
env/prompts.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
_CORNERS = {(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)}
|
| 6 |
+
_BOUNDARY_X = {0.0, 1.0}
|
| 7 |
+
_BOUNDARY_Y = {0.0, 1.0}
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _is_corner(x: float, y: float) -> bool:
|
| 11 |
+
return (round(x, 4), round(y, 4)) in _CORNERS
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _is_boundary(x: float, y: float) -> bool:
|
| 15 |
+
return x in _BOUNDARY_X or y in _BOUNDARY_Y
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def format_target_for_prompt(target: dict) -> str:
|
| 19 |
+
vertices = target["vertices_coords"]
|
| 20 |
+
edges_v = target["edges_vertices"]
|
| 21 |
+
edges_a = target["edges_assignment"]
|
| 22 |
+
|
| 23 |
+
lines = []
|
| 24 |
+
for (v1, v2), assignment in zip(edges_v, edges_a):
|
| 25 |
+
if assignment not in ("M", "V"):
|
| 26 |
+
continue
|
| 27 |
+
x1, y1 = vertices[v1]
|
| 28 |
+
x2, y2 = vertices[v2]
|
| 29 |
+
label = "Mountain" if assignment == "M" else "Valley"
|
| 30 |
+
lines.append(
|
| 31 |
+
f"{label} fold: ({round(x1, 4)}, {round(y1, 4)}) -> ({round(x2, 4)}, {round(y2, 4)})"
|
| 32 |
+
)
|
| 33 |
+
return "\n".join(lines)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def format_anchor_points(paper_state) -> str:
|
| 37 |
+
corners = []
|
| 38 |
+
boundary_pts = []
|
| 39 |
+
intersections = []
|
| 40 |
+
|
| 41 |
+
for x, y in paper_state.anchor_points():
|
| 42 |
+
rx, ry = round(x, 4), round(y, 4)
|
| 43 |
+
if _is_corner(rx, ry):
|
| 44 |
+
corners.append((rx, ry))
|
| 45 |
+
elif _is_boundary(rx, ry):
|
| 46 |
+
boundary_pts.append((rx, ry))
|
| 47 |
+
else:
|
| 48 |
+
intersections.append((rx, ry))
|
| 49 |
+
|
| 50 |
+
def fmt_pts(pts: list[tuple[float, float]]) -> str:
|
| 51 |
+
return " ".join(f"({x},{y})" for x, y in pts)
|
| 52 |
+
|
| 53 |
+
lines = []
|
| 54 |
+
if corners:
|
| 55 |
+
lines.append(f" Corners: {fmt_pts(corners)}")
|
| 56 |
+
if boundary_pts:
|
| 57 |
+
lines.append(f" Boundary pts: {fmt_pts(boundary_pts)}")
|
| 58 |
+
if intersections:
|
| 59 |
+
lines.append(f" Intersections: {fmt_pts(intersections)}")
|
| 60 |
+
|
| 61 |
+
return "\n".join(lines)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def format_crease_history(paper_state) -> str:
|
| 65 |
+
history = paper_state.fold_history
|
| 66 |
+
if not history:
|
| 67 |
+
return "none"
|
| 68 |
+
|
| 69 |
+
lines = []
|
| 70 |
+
for i, fold in enumerate(history, 1):
|
| 71 |
+
p1, p2 = fold["p1"], fold["p2"]
|
| 72 |
+
assignment = fold["assignment"]
|
| 73 |
+
label = "Mountain" if assignment == "M" else "Valley"
|
| 74 |
+
x1, y1 = round(p1[0], 4), round(p1[1], 4)
|
| 75 |
+
x2, y2 = round(p2[0], 4), round(p2[1], 4)
|
| 76 |
+
lines.append(f" {i}. {label} fold: ({x1}, {y1}) -> ({x2}, {y2})")
|
| 77 |
+
|
| 78 |
+
return "\n".join(lines)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def format_reward_feedback(reward: Optional[dict]) -> str:
|
| 82 |
+
if not reward:
|
| 83 |
+
return "(no feedback yet)"
|
| 84 |
+
|
| 85 |
+
keys = ["kawasaki", "maekawa", "blb", "progress", "economy", "total"]
|
| 86 |
+
parts = []
|
| 87 |
+
for k in keys:
|
| 88 |
+
if k in reward:
|
| 89 |
+
parts.append(f"{k}={reward[k]:.2f}")
|
| 90 |
+
|
| 91 |
+
for k, v in reward.items():
|
| 92 |
+
if k not in keys:
|
| 93 |
+
parts.append(f"{k}={v:.2f}")
|
| 94 |
+
|
| 95 |
+
return " " + " ".join(parts)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def code_as_policy_prompt(target: dict, max_folds: int = 8) -> str:
|
| 99 |
+
formatted_target = format_target_for_prompt(target)
|
| 100 |
+
return f"""You are an origami designer. Generate a fold sequence for a unit square [0,1]x[0,1].
|
| 101 |
+
|
| 102 |
+
TARGET CREASE PATTERN:
|
| 103 |
+
{formatted_target}
|
| 104 |
+
|
| 105 |
+
RULES (must hold at every interior vertex):
|
| 106 |
+
- Kawasaki: alternating sector angles sum equally (each half = 180 degrees)
|
| 107 |
+
- Maekawa: |mountain_count - valley_count| = 2
|
| 108 |
+
- Big-Little-Big: folds bounding the smallest sector must have opposite types (one M, one V)
|
| 109 |
+
|
| 110 |
+
INITIAL ANCHOR POINTS (valid fold endpoints β new ones appear when creases intersect):
|
| 111 |
+
Corners: (0.0,0.0) (1.0,0.0) (1.0,1.0) (0.0,1.0)
|
| 112 |
+
Midpoints: (0.0,0.5) (0.5,0.0) (1.0,0.5) (0.5,1.0)
|
| 113 |
+
Note: new anchor points are created at crease intersections.
|
| 114 |
+
|
| 115 |
+
Output at most {max_folds} folds. Both endpoints must be valid anchor points.
|
| 116 |
+
Output ONLY the JSON list, wrapped in <folds> tags:
|
| 117 |
+
|
| 118 |
+
<folds>
|
| 119 |
+
[
|
| 120 |
+
{{"instruction": "Describe the fold in plain English", "from": [x1, y1], "to": [x2, y2], "assignment": "V"}},
|
| 121 |
+
{{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"}}
|
| 122 |
+
]
|
| 123 |
+
</folds>"""
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def step_level_prompt(
|
| 127 |
+
target: dict,
|
| 128 |
+
paper_state,
|
| 129 |
+
step: int,
|
| 130 |
+
max_steps: int,
|
| 131 |
+
last_reward: Optional[dict] = None,
|
| 132 |
+
) -> str:
|
| 133 |
+
formatted_target = format_target_for_prompt(target)
|
| 134 |
+
formatted_history = format_crease_history(paper_state)
|
| 135 |
+
formatted_anchors = format_anchor_points(paper_state)
|
| 136 |
+
formatted_reward = format_reward_feedback(last_reward)
|
| 137 |
+
|
| 138 |
+
return f"""You are an origami designer building a crease pattern step by step.
|
| 139 |
+
|
| 140 |
+
TARGET:
|
| 141 |
+
{formatted_target}
|
| 142 |
+
|
| 143 |
+
CURRENT STATE (step {step} of {max_steps}):
|
| 144 |
+
Creases placed:
|
| 145 |
+
{formatted_history}
|
| 146 |
+
|
| 147 |
+
AVAILABLE ANCHOR POINTS:
|
| 148 |
+
{formatted_anchors}
|
| 149 |
+
|
| 150 |
+
LAST REWARD:
|
| 151 |
+
{formatted_reward}
|
| 152 |
+
|
| 153 |
+
Add the NEXT crease. Both endpoints must be listed anchor points above.
|
| 154 |
+
Output ONLY valid JSON (no extra text):
|
| 155 |
+
{{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M" or "V"}}"""
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def parse_fold_list(completion: str) -> list[dict]:
|
| 159 |
+
match = re.search(r"<folds>(.*?)</folds>", completion, re.IGNORECASE | re.DOTALL)
|
| 160 |
+
if not match:
|
| 161 |
+
raise ValueError("No <folds>...</folds> tags found in completion")
|
| 162 |
+
|
| 163 |
+
raw = match.group(1).strip()
|
| 164 |
+
|
| 165 |
+
try:
|
| 166 |
+
data = json.loads(raw)
|
| 167 |
+
except json.JSONDecodeError as e:
|
| 168 |
+
raise ValueError(f"Failed to parse JSON inside <folds> tags: {e}") from e
|
| 169 |
+
|
| 170 |
+
if not isinstance(data, list):
|
| 171 |
+
raise ValueError(f"Expected a JSON list inside <folds> tags, got {type(data).__name__}")
|
| 172 |
+
|
| 173 |
+
cleaned = []
|
| 174 |
+
for i, item in enumerate(data):
|
| 175 |
+
if not isinstance(item, dict):
|
| 176 |
+
raise ValueError(f"Fold {i} is not a dict: {item!r}")
|
| 177 |
+
|
| 178 |
+
for field in ("from", "to", "assignment"):
|
| 179 |
+
if field not in item:
|
| 180 |
+
raise ValueError(f"Fold {i} missing required field '{field}'")
|
| 181 |
+
|
| 182 |
+
from_pt = item["from"]
|
| 183 |
+
to_pt = item["to"]
|
| 184 |
+
|
| 185 |
+
if (
|
| 186 |
+
not isinstance(from_pt, list)
|
| 187 |
+
or len(from_pt) != 2
|
| 188 |
+
or not all(isinstance(v, (int, float)) for v in from_pt)
|
| 189 |
+
):
|
| 190 |
+
raise ValueError(f"Fold {i} 'from' must be a list of 2 numbers, got {from_pt!r}")
|
| 191 |
+
|
| 192 |
+
if (
|
| 193 |
+
not isinstance(to_pt, list)
|
| 194 |
+
or len(to_pt) != 2
|
| 195 |
+
or not all(isinstance(v, (int, float)) for v in to_pt)
|
| 196 |
+
):
|
| 197 |
+
raise ValueError(f"Fold {i} 'to' must be a list of 2 numbers, got {to_pt!r}")
|
| 198 |
+
|
| 199 |
+
if not isinstance(item["assignment"], str):
|
| 200 |
+
raise ValueError(f"Fold {i} 'assignment' must be a string")
|
| 201 |
+
|
| 202 |
+
cleaned.append(
|
| 203 |
+
{
|
| 204 |
+
"from": [float(from_pt[0]), float(from_pt[1])],
|
| 205 |
+
"to": [float(to_pt[0]), float(to_pt[1])],
|
| 206 |
+
"assignment": item["assignment"],
|
| 207 |
+
"instruction": item.get("instruction", ""),
|
| 208 |
+
}
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
return cleaned
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def parse_single_fold(completion: str) -> dict:
|
| 215 |
+
start = completion.find("{")
|
| 216 |
+
end = completion.rfind("}")
|
| 217 |
+
|
| 218 |
+
if start == -1 or end == -1 or end <= start:
|
| 219 |
+
raise ValueError("No JSON object found in completion")
|
| 220 |
+
|
| 221 |
+
raw = completion[start : end + 1]
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
data = json.loads(raw)
|
| 225 |
+
except json.JSONDecodeError as e:
|
| 226 |
+
raise ValueError(f"Failed to parse JSON from completion: {e}") from e
|
| 227 |
+
|
| 228 |
+
if not isinstance(data, dict):
|
| 229 |
+
raise ValueError(f"Expected a JSON object, got {type(data).__name__}")
|
| 230 |
+
|
| 231 |
+
for field in ("from", "to", "assignment"):
|
| 232 |
+
if field not in data:
|
| 233 |
+
raise ValueError(f"Missing required field '{field}' in fold JSON")
|
| 234 |
+
|
| 235 |
+
return data
|
env/rewards.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from .verifier import check_all_vertices, geometric_crease_coverage
|
| 3 |
+
from .paper_state import PaperState
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def load_target(target_path: str) -> dict:
|
| 7 |
+
"""Load a .fold target file and return it as a dict."""
|
| 8 |
+
with open(target_path) as f:
|
| 9 |
+
return json.load(f)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def target_crease_edges(target: dict) -> list[dict]:
|
| 13 |
+
"""
|
| 14 |
+
Extract crease edges from a FOLD target dict as list of
|
| 15 |
+
{'v1': (x1,y1), 'v2': (x2,y2), 'assignment': 'M'|'V'} dicts.
|
| 16 |
+
"""
|
| 17 |
+
verts = target['vertices_coords']
|
| 18 |
+
result = []
|
| 19 |
+
for i, (v1_idx, v2_idx) in enumerate(target['edges_vertices']):
|
| 20 |
+
assignment = target['edges_assignment'][i]
|
| 21 |
+
if assignment in ('M', 'V'):
|
| 22 |
+
result.append({
|
| 23 |
+
'v1': tuple(verts[v1_idx]),
|
| 24 |
+
'v2': tuple(verts[v2_idx]),
|
| 25 |
+
'assignment': assignment,
|
| 26 |
+
})
|
| 27 |
+
return result
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def compute_reward(
|
| 31 |
+
state: PaperState,
|
| 32 |
+
action_result: dict,
|
| 33 |
+
target: dict,
|
| 34 |
+
) -> dict:
|
| 35 |
+
"""
|
| 36 |
+
Compute the full reward dict for a fold action.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
state: current PaperState AFTER the action was applied
|
| 40 |
+
action_result: {'valid': bool, 'anchored': bool, 'new_vertices': list, 'errors': list}
|
| 41 |
+
target: FOLD target dict
|
| 42 |
+
|
| 43 |
+
Returns dict with keys:
|
| 44 |
+
format, anchored, kawasaki, maekawa, blb, progress, economy, completion, efficiency, total
|
| 45 |
+
"""
|
| 46 |
+
r = {}
|
| 47 |
+
|
| 48 |
+
# Gate 1: format β did the action parse and apply?
|
| 49 |
+
r['format'] = 1.0 if action_result.get('valid', False) else 0.0
|
| 50 |
+
if not r['format']:
|
| 51 |
+
r['total'] = -0.1
|
| 52 |
+
return r
|
| 53 |
+
|
| 54 |
+
# Gate 2: anchoring β were endpoints valid anchor points?
|
| 55 |
+
r['anchored'] = 1.0 if action_result.get('anchored', False) else 0.3
|
| 56 |
+
|
| 57 |
+
# Vertex-level validity checks (all interior vertices)
|
| 58 |
+
vertex_scores = check_all_vertices(state.graph)
|
| 59 |
+
r['kawasaki'] = vertex_scores['kawasaki']
|
| 60 |
+
r['maekawa'] = vertex_scores['maekawa']
|
| 61 |
+
r['blb'] = vertex_scores['blb']
|
| 62 |
+
|
| 63 |
+
# Geometric progress
|
| 64 |
+
t_edges = target_crease_edges(target)
|
| 65 |
+
coverage, economy = geometric_crease_coverage(state, t_edges)
|
| 66 |
+
r['progress'] = coverage
|
| 67 |
+
r['economy'] = economy
|
| 68 |
+
|
| 69 |
+
# Completion bonus: high coverage + all vertex conditions satisfied
|
| 70 |
+
all_valid = (r['kawasaki'] == 1.0 and r['maekawa'] == 1.0 and r['blb'] == 1.0)
|
| 71 |
+
r['completion'] = 10.0 if (r['progress'] > 0.9 and all_valid) else 0.0
|
| 72 |
+
|
| 73 |
+
# Step cost
|
| 74 |
+
r['efficiency'] = -0.01
|
| 75 |
+
|
| 76 |
+
# Weighted total
|
| 77 |
+
r['total'] = (
|
| 78 |
+
0.05 * r['anchored'] +
|
| 79 |
+
0.08 * r['kawasaki'] +
|
| 80 |
+
0.07 * r['maekawa'] +
|
| 81 |
+
0.05 * r['blb'] +
|
| 82 |
+
0.45 * r['progress'] +
|
| 83 |
+
0.10 * r['economy'] +
|
| 84 |
+
r['completion'] +
|
| 85 |
+
r['efficiency']
|
| 86 |
+
)
|
| 87 |
+
return r
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def compute_terminal_reward(state: PaperState, target: dict) -> dict:
|
| 91 |
+
"""Compute reward for the final state after a complete fold sequence."""
|
| 92 |
+
fake_result = {'valid': True, 'anchored': True, 'new_vertices': [], 'errors': []}
|
| 93 |
+
return compute_reward(state, fake_result, target)
|
env/targets/__init__.py
ADDED
|
File without changes
|
env/targets/accordion_3h.fold
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.25],
|
| 8 |
+
[1.0, 0.25],
|
| 9 |
+
[0.0, 0.5],
|
| 10 |
+
[1.0, 0.5],
|
| 11 |
+
[0.0, 0.75],
|
| 12 |
+
[1.0, 0.75]
|
| 13 |
+
],
|
| 14 |
+
"edges_vertices": [
|
| 15 |
+
[0, 1],
|
| 16 |
+
[1, 5],
|
| 17 |
+
[5, 7],
|
| 18 |
+
[7, 9],
|
| 19 |
+
[9, 2],
|
| 20 |
+
[2, 3],
|
| 21 |
+
[3, 8],
|
| 22 |
+
[8, 6],
|
| 23 |
+
[6, 4],
|
| 24 |
+
[4, 0],
|
| 25 |
+
[4, 5],
|
| 26 |
+
[6, 7],
|
| 27 |
+
[8, 9]
|
| 28 |
+
],
|
| 29 |
+
"edges_assignment": [
|
| 30 |
+
"B",
|
| 31 |
+
"B",
|
| 32 |
+
"B",
|
| 33 |
+
"B",
|
| 34 |
+
"B",
|
| 35 |
+
"B",
|
| 36 |
+
"B",
|
| 37 |
+
"B",
|
| 38 |
+
"B",
|
| 39 |
+
"B",
|
| 40 |
+
"V",
|
| 41 |
+
"M",
|
| 42 |
+
"V"
|
| 43 |
+
],
|
| 44 |
+
"edges_foldAngle": [
|
| 45 |
+
0,
|
| 46 |
+
0,
|
| 47 |
+
0,
|
| 48 |
+
0,
|
| 49 |
+
0,
|
| 50 |
+
0,
|
| 51 |
+
0,
|
| 52 |
+
0,
|
| 53 |
+
0,
|
| 54 |
+
0,
|
| 55 |
+
-180,
|
| 56 |
+
-180,
|
| 57 |
+
-180
|
| 58 |
+
],
|
| 59 |
+
"faces_vertices": [
|
| 60 |
+
[0, 1, 5, 4],
|
| 61 |
+
[4, 5, 7, 6],
|
| 62 |
+
[6, 7, 9, 8],
|
| 63 |
+
[8, 9, 2, 3]
|
| 64 |
+
],
|
| 65 |
+
"level": 3,
|
| 66 |
+
"description": "Three alternating horizontal folds at y=0.25 (valley), y=0.5 (mountain), y=0.75 (valley) forming an accordion"
|
| 67 |
+
}
|
env/targets/accordion_4h.fold
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.2],
|
| 8 |
+
[1.0, 0.2],
|
| 9 |
+
[0.0, 0.4],
|
| 10 |
+
[1.0, 0.4],
|
| 11 |
+
[0.0, 0.6],
|
| 12 |
+
[1.0, 0.6],
|
| 13 |
+
[0.0, 0.8],
|
| 14 |
+
[1.0, 0.8]
|
| 15 |
+
],
|
| 16 |
+
"edges_vertices": [
|
| 17 |
+
[0, 1],
|
| 18 |
+
[1, 5],
|
| 19 |
+
[5, 7],
|
| 20 |
+
[7, 9],
|
| 21 |
+
[9, 11],
|
| 22 |
+
[11, 2],
|
| 23 |
+
[2, 3],
|
| 24 |
+
[3, 10],
|
| 25 |
+
[10, 8],
|
| 26 |
+
[8, 6],
|
| 27 |
+
[6, 4],
|
| 28 |
+
[4, 0],
|
| 29 |
+
[4, 5],
|
| 30 |
+
[6, 7],
|
| 31 |
+
[8, 9],
|
| 32 |
+
[10, 11]
|
| 33 |
+
],
|
| 34 |
+
"edges_assignment": [
|
| 35 |
+
"B",
|
| 36 |
+
"B",
|
| 37 |
+
"B",
|
| 38 |
+
"B",
|
| 39 |
+
"B",
|
| 40 |
+
"B",
|
| 41 |
+
"B",
|
| 42 |
+
"B",
|
| 43 |
+
"B",
|
| 44 |
+
"B",
|
| 45 |
+
"B",
|
| 46 |
+
"B",
|
| 47 |
+
"V",
|
| 48 |
+
"M",
|
| 49 |
+
"V",
|
| 50 |
+
"M"
|
| 51 |
+
],
|
| 52 |
+
"edges_foldAngle": [
|
| 53 |
+
0,
|
| 54 |
+
0,
|
| 55 |
+
0,
|
| 56 |
+
0,
|
| 57 |
+
0,
|
| 58 |
+
0,
|
| 59 |
+
0,
|
| 60 |
+
0,
|
| 61 |
+
0,
|
| 62 |
+
0,
|
| 63 |
+
0,
|
| 64 |
+
0,
|
| 65 |
+
-180,
|
| 66 |
+
-180,
|
| 67 |
+
-180,
|
| 68 |
+
-180
|
| 69 |
+
],
|
| 70 |
+
"faces_vertices": [
|
| 71 |
+
[0, 1, 5, 4],
|
| 72 |
+
[4, 5, 7, 6],
|
| 73 |
+
[6, 7, 9, 8],
|
| 74 |
+
[8, 9, 11, 10],
|
| 75 |
+
[10, 11, 2, 3]
|
| 76 |
+
],
|
| 77 |
+
"level": 3,
|
| 78 |
+
"description": "Four alternating horizontal folds at y=0.2 (valley), y=0.4 (mountain), y=0.6 (valley), y=0.8 (mountain) forming an accordion"
|
| 79 |
+
}
|
env/targets/diagonal_anti.fold
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0]
|
| 7 |
+
],
|
| 8 |
+
"edges_vertices": [
|
| 9 |
+
[0, 1],
|
| 10 |
+
[1, 2],
|
| 11 |
+
[2, 3],
|
| 12 |
+
[3, 0],
|
| 13 |
+
[1, 3]
|
| 14 |
+
],
|
| 15 |
+
"edges_assignment": [
|
| 16 |
+
"B",
|
| 17 |
+
"B",
|
| 18 |
+
"B",
|
| 19 |
+
"B",
|
| 20 |
+
"M"
|
| 21 |
+
],
|
| 22 |
+
"edges_foldAngle": [
|
| 23 |
+
0,
|
| 24 |
+
0,
|
| 25 |
+
0,
|
| 26 |
+
0,
|
| 27 |
+
-180
|
| 28 |
+
],
|
| 29 |
+
"faces_vertices": [
|
| 30 |
+
[0, 1, 3],
|
| 31 |
+
[1, 2, 3]
|
| 32 |
+
],
|
| 33 |
+
"level": 1,
|
| 34 |
+
"description": "One mountain fold along the anti-diagonal from (1,0) to (0,1)"
|
| 35 |
+
}
|
env/targets/diagonal_main.fold
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0]
|
| 7 |
+
],
|
| 8 |
+
"edges_vertices": [
|
| 9 |
+
[0, 1],
|
| 10 |
+
[1, 2],
|
| 11 |
+
[2, 3],
|
| 12 |
+
[3, 0],
|
| 13 |
+
[0, 2]
|
| 14 |
+
],
|
| 15 |
+
"edges_assignment": [
|
| 16 |
+
"B",
|
| 17 |
+
"B",
|
| 18 |
+
"B",
|
| 19 |
+
"B",
|
| 20 |
+
"V"
|
| 21 |
+
],
|
| 22 |
+
"edges_foldAngle": [
|
| 23 |
+
0,
|
| 24 |
+
0,
|
| 25 |
+
0,
|
| 26 |
+
0,
|
| 27 |
+
-180
|
| 28 |
+
],
|
| 29 |
+
"faces_vertices": [
|
| 30 |
+
[0, 1, 2],
|
| 31 |
+
[0, 2, 3]
|
| 32 |
+
],
|
| 33 |
+
"level": 1,
|
| 34 |
+
"description": "One valley fold along the main diagonal from (0,0) to (1,1)"
|
| 35 |
+
}
|
env/targets/half_horizontal.fold
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.5],
|
| 8 |
+
[1.0, 0.5]
|
| 9 |
+
],
|
| 10 |
+
"edges_vertices": [
|
| 11 |
+
[0, 1],
|
| 12 |
+
[1, 5],
|
| 13 |
+
[5, 2],
|
| 14 |
+
[2, 3],
|
| 15 |
+
[3, 4],
|
| 16 |
+
[4, 0],
|
| 17 |
+
[4, 5]
|
| 18 |
+
],
|
| 19 |
+
"edges_assignment": [
|
| 20 |
+
"B",
|
| 21 |
+
"B",
|
| 22 |
+
"B",
|
| 23 |
+
"B",
|
| 24 |
+
"B",
|
| 25 |
+
"B",
|
| 26 |
+
"V"
|
| 27 |
+
],
|
| 28 |
+
"edges_foldAngle": [
|
| 29 |
+
0,
|
| 30 |
+
0,
|
| 31 |
+
0,
|
| 32 |
+
0,
|
| 33 |
+
0,
|
| 34 |
+
0,
|
| 35 |
+
-180
|
| 36 |
+
],
|
| 37 |
+
"faces_vertices": [
|
| 38 |
+
[0, 1, 5, 4],
|
| 39 |
+
[4, 5, 2, 3]
|
| 40 |
+
],
|
| 41 |
+
"level": 1,
|
| 42 |
+
"description": "One valley fold along y=0.5, folding the paper in half horizontally"
|
| 43 |
+
}
|
env/targets/half_vertical.fold
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.5, 0.0],
|
| 8 |
+
[0.5, 1.0]
|
| 9 |
+
],
|
| 10 |
+
"edges_vertices": [
|
| 11 |
+
[0, 4],
|
| 12 |
+
[4, 1],
|
| 13 |
+
[1, 2],
|
| 14 |
+
[2, 5],
|
| 15 |
+
[5, 3],
|
| 16 |
+
[3, 0],
|
| 17 |
+
[4, 5]
|
| 18 |
+
],
|
| 19 |
+
"edges_assignment": [
|
| 20 |
+
"B",
|
| 21 |
+
"B",
|
| 22 |
+
"B",
|
| 23 |
+
"B",
|
| 24 |
+
"B",
|
| 25 |
+
"B",
|
| 26 |
+
"M"
|
| 27 |
+
],
|
| 28 |
+
"edges_foldAngle": [
|
| 29 |
+
0,
|
| 30 |
+
0,
|
| 31 |
+
0,
|
| 32 |
+
0,
|
| 33 |
+
0,
|
| 34 |
+
0,
|
| 35 |
+
-180
|
| 36 |
+
],
|
| 37 |
+
"faces_vertices": [
|
| 38 |
+
[0, 4, 5, 3],
|
| 39 |
+
[4, 1, 2, 5]
|
| 40 |
+
],
|
| 41 |
+
"level": 1,
|
| 42 |
+
"description": "One mountain fold along x=0.5, folding the paper in half vertically"
|
| 43 |
+
}
|
env/targets/thirds_h.fold
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.0, 0.3333333333333333],
|
| 8 |
+
[1.0, 0.3333333333333333],
|
| 9 |
+
[0.0, 0.6666666666666666],
|
| 10 |
+
[1.0, 0.6666666666666666]
|
| 11 |
+
],
|
| 12 |
+
"edges_vertices": [
|
| 13 |
+
[0, 1],
|
| 14 |
+
[1, 5],
|
| 15 |
+
[5, 7],
|
| 16 |
+
[7, 2],
|
| 17 |
+
[2, 3],
|
| 18 |
+
[3, 6],
|
| 19 |
+
[6, 4],
|
| 20 |
+
[4, 0],
|
| 21 |
+
[4, 5],
|
| 22 |
+
[6, 7]
|
| 23 |
+
],
|
| 24 |
+
"edges_assignment": [
|
| 25 |
+
"B",
|
| 26 |
+
"B",
|
| 27 |
+
"B",
|
| 28 |
+
"B",
|
| 29 |
+
"B",
|
| 30 |
+
"B",
|
| 31 |
+
"B",
|
| 32 |
+
"B",
|
| 33 |
+
"V",
|
| 34 |
+
"V"
|
| 35 |
+
],
|
| 36 |
+
"edges_foldAngle": [
|
| 37 |
+
0,
|
| 38 |
+
0,
|
| 39 |
+
0,
|
| 40 |
+
0,
|
| 41 |
+
0,
|
| 42 |
+
0,
|
| 43 |
+
0,
|
| 44 |
+
0,
|
| 45 |
+
-180,
|
| 46 |
+
-180
|
| 47 |
+
],
|
| 48 |
+
"faces_vertices": [
|
| 49 |
+
[0, 1, 5, 4],
|
| 50 |
+
[4, 5, 7, 6],
|
| 51 |
+
[6, 7, 2, 3]
|
| 52 |
+
],
|
| 53 |
+
"level": 2,
|
| 54 |
+
"description": "Two parallel valley folds at y=1/3 and y=2/3, dividing the paper into horizontal thirds"
|
| 55 |
+
}
|
env/targets/thirds_v.fold
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vertices_coords": [
|
| 3 |
+
[0.0, 0.0],
|
| 4 |
+
[1.0, 0.0],
|
| 5 |
+
[1.0, 1.0],
|
| 6 |
+
[0.0, 1.0],
|
| 7 |
+
[0.3333333333333333, 0.0],
|
| 8 |
+
[0.6666666666666666, 0.0],
|
| 9 |
+
[0.3333333333333333, 1.0],
|
| 10 |
+
[0.6666666666666666, 1.0]
|
| 11 |
+
],
|
| 12 |
+
"edges_vertices": [
|
| 13 |
+
[0, 4],
|
| 14 |
+
[4, 5],
|
| 15 |
+
[5, 1],
|
| 16 |
+
[1, 2],
|
| 17 |
+
[2, 7],
|
| 18 |
+
[7, 6],
|
| 19 |
+
[6, 3],
|
| 20 |
+
[3, 0],
|
| 21 |
+
[4, 6],
|
| 22 |
+
[5, 7]
|
| 23 |
+
],
|
| 24 |
+
"edges_assignment": [
|
| 25 |
+
"B",
|
| 26 |
+
"B",
|
| 27 |
+
"B",
|
| 28 |
+
"B",
|
| 29 |
+
"B",
|
| 30 |
+
"B",
|
| 31 |
+
"B",
|
| 32 |
+
"B",
|
| 33 |
+
"M",
|
| 34 |
+
"M"
|
| 35 |
+
],
|
| 36 |
+
"edges_foldAngle": [
|
| 37 |
+
0,
|
| 38 |
+
0,
|
| 39 |
+
0,
|
| 40 |
+
0,
|
| 41 |
+
0,
|
| 42 |
+
0,
|
| 43 |
+
0,
|
| 44 |
+
0,
|
| 45 |
+
-180,
|
| 46 |
+
-180
|
| 47 |
+
],
|
| 48 |
+
"faces_vertices": [
|
| 49 |
+
[0, 4, 6, 3],
|
| 50 |
+
[4, 5, 7, 6],
|
| 51 |
+
[5, 1, 2, 7]
|
| 52 |
+
],
|
| 53 |
+
"level": 2,
|
| 54 |
+
"description": "Two parallel mountain folds at x=1/3 and x=2/3, dividing the paper into vertical thirds"
|
| 55 |
+
}
|
env/targets/validator.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Validates all .fold target files against origami theorems.
|
| 3 |
+
Run directly: python -m env.targets.validator
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
from ..graph import CreaseGraph
|
| 11 |
+
from ..verifier import check_kawasaki_at_vertex, check_maekawa_at_vertex, check_blb_at_vertex
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_graph_from_fold(fold_data: dict) -> CreaseGraph:
|
| 15 |
+
"""
|
| 16 |
+
Reconstruct a CreaseGraph from a FOLD JSON dict.
|
| 17 |
+
Used to validate target files.
|
| 18 |
+
"""
|
| 19 |
+
graph = CreaseGraph()
|
| 20 |
+
|
| 21 |
+
verts = fold_data['vertices_coords']
|
| 22 |
+
edges = fold_data['edges_vertices']
|
| 23 |
+
assignments = fold_data['edges_assignment']
|
| 24 |
+
|
| 25 |
+
# Map file vertex indices to graph vertex IDs
|
| 26 |
+
vert_map = {}
|
| 27 |
+
for i, (x, y) in enumerate(verts):
|
| 28 |
+
vid = graph.add_vertex(float(x), float(y))
|
| 29 |
+
vert_map[i] = vid
|
| 30 |
+
|
| 31 |
+
# Add edges (boundary edges from init may already exist, add_edge handles dedup)
|
| 32 |
+
for i, (v1_idx, v2_idx) in enumerate(edges):
|
| 33 |
+
v1_id = vert_map[v1_idx]
|
| 34 |
+
v2_id = vert_map[v2_idx]
|
| 35 |
+
assignment = assignments[i]
|
| 36 |
+
graph.add_edge(v1_id, v2_id, assignment)
|
| 37 |
+
|
| 38 |
+
return graph
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def validate_target(fold_path: str) -> dict:
|
| 42 |
+
"""
|
| 43 |
+
Validate a single .fold target file.
|
| 44 |
+
Returns {'file': str, 'valid': bool, 'issues': list[str], 'interior_vertices': int}
|
| 45 |
+
"""
|
| 46 |
+
with open(fold_path) as f:
|
| 47 |
+
fold_data = json.load(f)
|
| 48 |
+
|
| 49 |
+
issues = []
|
| 50 |
+
|
| 51 |
+
# Basic structure checks
|
| 52 |
+
required = ['vertices_coords', 'edges_vertices', 'edges_assignment', 'edges_foldAngle']
|
| 53 |
+
for field in required:
|
| 54 |
+
if field not in fold_data:
|
| 55 |
+
issues.append(f"Missing field: {field}")
|
| 56 |
+
|
| 57 |
+
if issues:
|
| 58 |
+
return {'file': os.path.basename(fold_path), 'valid': False, 'issues': issues, 'interior_vertices': -1}
|
| 59 |
+
|
| 60 |
+
n_edges = len(fold_data['edges_vertices'])
|
| 61 |
+
if len(fold_data['edges_assignment']) != n_edges:
|
| 62 |
+
issues.append("edges_assignment length mismatch")
|
| 63 |
+
if len(fold_data['edges_foldAngle']) != n_edges:
|
| 64 |
+
issues.append("edges_foldAngle length mismatch")
|
| 65 |
+
|
| 66 |
+
# Build graph and check theorems
|
| 67 |
+
graph = build_graph_from_fold(fold_data)
|
| 68 |
+
interior = graph.interior_vertices()
|
| 69 |
+
|
| 70 |
+
for v_id in interior:
|
| 71 |
+
ok, alt_sum = check_kawasaki_at_vertex(v_id, graph)
|
| 72 |
+
if not ok:
|
| 73 |
+
issues.append(f"Kawasaki violated at vertex {v_id} (alt_sum={alt_sum:.6f})")
|
| 74 |
+
|
| 75 |
+
if not check_maekawa_at_vertex(v_id, graph):
|
| 76 |
+
issues.append(f"Maekawa violated at vertex {v_id}")
|
| 77 |
+
|
| 78 |
+
blb_violations = check_blb_at_vertex(v_id, graph)
|
| 79 |
+
if blb_violations:
|
| 80 |
+
issues.append(f"BLB violated at vertex {v_id}: {blb_violations}")
|
| 81 |
+
|
| 82 |
+
return {
|
| 83 |
+
'file': os.path.basename(fold_path),
|
| 84 |
+
'valid': len(issues) == 0,
|
| 85 |
+
'issues': issues,
|
| 86 |
+
'interior_vertices': len(interior),
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def validate_all(targets_dir: str = None) -> bool:
|
| 91 |
+
"""Validate all .fold files in the targets directory. Returns True if all pass."""
|
| 92 |
+
if targets_dir is None:
|
| 93 |
+
targets_dir = Path(__file__).parent
|
| 94 |
+
|
| 95 |
+
all_pass = True
|
| 96 |
+
fold_files = sorted(Path(targets_dir).glob('*.fold'))
|
| 97 |
+
|
| 98 |
+
if not fold_files:
|
| 99 |
+
print("No .fold files found")
|
| 100 |
+
return False
|
| 101 |
+
|
| 102 |
+
for fold_path in fold_files:
|
| 103 |
+
result = validate_target(str(fold_path))
|
| 104 |
+
status = "OK" if result['valid'] else "FAIL"
|
| 105 |
+
n_interior = result['interior_vertices']
|
| 106 |
+
print(f" [{status}] {result['file']} β {n_interior} interior vertices")
|
| 107 |
+
if result['issues']:
|
| 108 |
+
for issue in result['issues']:
|
| 109 |
+
print(f" ! {issue}")
|
| 110 |
+
if not result['valid']:
|
| 111 |
+
all_pass = False
|
| 112 |
+
|
| 113 |
+
return all_pass
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == '__main__':
|
| 117 |
+
print("Validating targets...")
|
| 118 |
+
ok = validate_all()
|
| 119 |
+
sys.exit(0 if ok else 1)
|
env/targets/validator_check.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json, sys, os
|
| 2 |
+
|
| 3 |
+
targets_dir = "/Users/ianalin/Desktop/optigami/env/targets"
|
| 4 |
+
for fname in os.listdir(targets_dir):
|
| 5 |
+
if not fname.endswith(".fold"):
|
| 6 |
+
continue
|
| 7 |
+
with open(os.path.join(targets_dir, fname)) as f:
|
| 8 |
+
d = json.load(f)
|
| 9 |
+
n_v = len(d["vertices_coords"])
|
| 10 |
+
n_e = len(d["edges_vertices"])
|
| 11 |
+
assert len(d["edges_assignment"]) == n_e, f"{fname}: assignment length mismatch"
|
| 12 |
+
assert len(d["edges_foldAngle"]) == n_e, f"{fname}: foldAngle length mismatch"
|
| 13 |
+
for e in d["edges_vertices"]:
|
| 14 |
+
assert e[0] < n_v and e[1] < n_v, f"{fname}: edge references invalid vertex"
|
| 15 |
+
for face in d["faces_vertices"]:
|
| 16 |
+
for vi in face:
|
| 17 |
+
assert vi < n_v, f"{fname}: face references invalid vertex"
|
| 18 |
+
creases = [i for i,a in enumerate(d["edges_assignment"]) if a in ('M','V')]
|
| 19 |
+
print(f"{fname}: {n_v} vertices, {n_e} edges, {len(creases)} creases, level={d.get('level','?')} OK")
|
env/verifier.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from .graph import CreaseGraph
|
| 3 |
+
from .paper_state import PaperState
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _compute_sector_angles(vertex_id: int, graph: CreaseGraph) -> list[float]:
|
| 7 |
+
"""Compute consecutive sector angles (CCW) at a vertex from its cyclic edges."""
|
| 8 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 9 |
+
n = len(cyclic_edges)
|
| 10 |
+
vx, vy = graph.vertices[vertex_id]
|
| 11 |
+
|
| 12 |
+
angles = []
|
| 13 |
+
for eid in cyclic_edges:
|
| 14 |
+
ev1, ev2, _ = graph.edges[eid]
|
| 15 |
+
other_id = ev2 if ev1 == vertex_id else ev1
|
| 16 |
+
ox, oy = graph.vertices[other_id]
|
| 17 |
+
angles.append(np.arctan2(oy - vy, ox - vx))
|
| 18 |
+
|
| 19 |
+
sectors = []
|
| 20 |
+
for i in range(n):
|
| 21 |
+
diff = angles[(i + 1) % n] - angles[i]
|
| 22 |
+
if diff < 0:
|
| 23 |
+
diff += 2 * np.pi
|
| 24 |
+
if diff > 2 * np.pi:
|
| 25 |
+
diff -= 2 * np.pi
|
| 26 |
+
sectors.append(diff)
|
| 27 |
+
|
| 28 |
+
return sectors
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def check_kawasaki_at_vertex(vertex_id: int, graph: CreaseGraph) -> tuple[bool, float]:
|
| 32 |
+
"""
|
| 33 |
+
Checks Kawasaki-Justin theorem at a single vertex.
|
| 34 |
+
|
| 35 |
+
Kawasaki: at an interior vertex with 2n creases, the alternating sum
|
| 36 |
+
of consecutive sector angles = 0.
|
| 37 |
+
Equivalently: sum(odd-indexed sectors) == sum(even-indexed sectors) == Ο.
|
| 38 |
+
|
| 39 |
+
Returns (satisfied: bool, |alternating_sum|: float).
|
| 40 |
+
Returns (True, 0.0) for vertices with degree < 4 (not an interior fold vertex yet).
|
| 41 |
+
Returns (False, inf) for odd-degree vertices (impossible for flat folds).
|
| 42 |
+
"""
|
| 43 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 44 |
+
n = len(cyclic_edges)
|
| 45 |
+
|
| 46 |
+
if n % 2 != 0:
|
| 47 |
+
return (False, float('inf'))
|
| 48 |
+
|
| 49 |
+
if n < 4:
|
| 50 |
+
return (True, 0.0)
|
| 51 |
+
|
| 52 |
+
sectors = _compute_sector_angles(vertex_id, graph)
|
| 53 |
+
alt_sum = sum(s * ((-1) ** i) for i, s in enumerate(sectors))
|
| 54 |
+
return (abs(alt_sum) < 1e-9, abs(alt_sum))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def check_maekawa_at_vertex(vertex_id: int, graph: CreaseGraph) -> bool:
|
| 58 |
+
"""
|
| 59 |
+
Checks Maekawa-Justin theorem at a single vertex.
|
| 60 |
+
|
| 61 |
+
Maekawa: |M - V| == 2 where M, V are counts of mountain/valley fold edges
|
| 62 |
+
at the vertex. BOUNDARY edges ('B') are NOT counted.
|
| 63 |
+
|
| 64 |
+
Returns True if satisfied or if vertex has fewer than 4 fold edges (not yet active).
|
| 65 |
+
"""
|
| 66 |
+
edge_ids = graph.vertex_edges[vertex_id]
|
| 67 |
+
fold_edges = [
|
| 68 |
+
eid for eid in edge_ids
|
| 69 |
+
if graph.edges[eid][2] in ('M', 'V')
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
if len(fold_edges) < 4:
|
| 73 |
+
return True
|
| 74 |
+
|
| 75 |
+
m_count = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'M')
|
| 76 |
+
v_count = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'V')
|
| 77 |
+
return abs(m_count - v_count) == 2
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def check_blb_at_vertex(vertex_id: int, graph: CreaseGraph) -> list[tuple[int, int]]:
|
| 81 |
+
"""
|
| 82 |
+
Checks Big-Little-Big lemma at a single vertex.
|
| 83 |
+
|
| 84 |
+
BLB: if sector angle i is a strict local minimum (smaller than both neighbors),
|
| 85 |
+
the fold edges bounding that sector must have OPPOSITE MV assignments.
|
| 86 |
+
|
| 87 |
+
Returns list of (edge_a_id, edge_b_id) pairs where BLB is violated.
|
| 88 |
+
Empty list = no violations.
|
| 89 |
+
"""
|
| 90 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 91 |
+
n = len(cyclic_edges)
|
| 92 |
+
|
| 93 |
+
if n < 4:
|
| 94 |
+
return []
|
| 95 |
+
|
| 96 |
+
sectors = _compute_sector_angles(vertex_id, graph)
|
| 97 |
+
violations = []
|
| 98 |
+
|
| 99 |
+
for i in range(n):
|
| 100 |
+
prev_sector = sectors[(i - 1) % n]
|
| 101 |
+
next_sector = sectors[(i + 1) % n]
|
| 102 |
+
|
| 103 |
+
if sectors[i] < prev_sector and sectors[i] < next_sector:
|
| 104 |
+
edge_a = cyclic_edges[i]
|
| 105 |
+
edge_b = cyclic_edges[(i + 1) % n]
|
| 106 |
+
|
| 107 |
+
assign_a = graph.edges[edge_a][2]
|
| 108 |
+
assign_b = graph.edges[edge_b][2]
|
| 109 |
+
|
| 110 |
+
if assign_a in ('M', 'V') and assign_b in ('M', 'V'):
|
| 111 |
+
if assign_a == assign_b:
|
| 112 |
+
violations.append((edge_a, edge_b))
|
| 113 |
+
|
| 114 |
+
return violations
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _angle_diff(a1: float, a2: float) -> float:
|
| 118 |
+
"""Minimum angle difference between two directed lines (considering 180Β° symmetry)."""
|
| 119 |
+
diff = abs(a1 - a2) % np.pi
|
| 120 |
+
return min(diff, np.pi - diff)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def geometric_crease_coverage(
|
| 124 |
+
state: PaperState,
|
| 125 |
+
target_edges: list[dict],
|
| 126 |
+
tol_pos: float = 0.05,
|
| 127 |
+
tol_angle_deg: float = 5.0,
|
| 128 |
+
) -> tuple[float, float]:
|
| 129 |
+
"""
|
| 130 |
+
Computes how well the current crease pattern matches the target.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
target_edges: list of {'v1': (x1,y1), 'v2': (x2,y2), 'assignment': 'M'|'V'}
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
(coverage, economy)
|
| 137 |
+
coverage: fraction of target creases matched [0, 1]
|
| 138 |
+
economy: penalty for excess creases [0, 1], 1.0 = no excess
|
| 139 |
+
"""
|
| 140 |
+
current_edges = state.crease_edges()
|
| 141 |
+
tol_angle_rad = np.deg2rad(tol_angle_deg)
|
| 142 |
+
|
| 143 |
+
matched = 0
|
| 144 |
+
for target in target_edges:
|
| 145 |
+
tx1, ty1 = target['v1']
|
| 146 |
+
tx2, ty2 = target['v2']
|
| 147 |
+
t_mid = ((tx1 + tx2) / 2.0, (ty1 + ty2) / 2.0)
|
| 148 |
+
t_angle = np.arctan2(ty2 - ty1, tx2 - tx1)
|
| 149 |
+
|
| 150 |
+
for current in current_edges:
|
| 151 |
+
cx1, cy1 = current['v1']
|
| 152 |
+
cx2, cy2 = current['v2']
|
| 153 |
+
c_mid = ((cx1 + cx2) / 2.0, (cy1 + cy2) / 2.0)
|
| 154 |
+
c_angle = np.arctan2(cy2 - cy1, cx2 - cx1)
|
| 155 |
+
|
| 156 |
+
mid_dist = np.hypot(c_mid[0] - t_mid[0], c_mid[1] - t_mid[1])
|
| 157 |
+
angle_distance = _angle_diff(c_angle, t_angle)
|
| 158 |
+
|
| 159 |
+
if mid_dist <= tol_pos and angle_distance <= tol_angle_rad:
|
| 160 |
+
matched += 1
|
| 161 |
+
break
|
| 162 |
+
|
| 163 |
+
coverage = matched / max(len(target_edges), 1)
|
| 164 |
+
n_excess = max(0, len(current_edges) - len(target_edges))
|
| 165 |
+
economy = max(0.0, 1.0 - n_excess / max(len(target_edges), 1))
|
| 166 |
+
return (coverage, economy)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def check_all_vertices(graph: CreaseGraph) -> dict:
|
| 170 |
+
"""
|
| 171 |
+
Run all vertex-level checks on every interior vertex.
|
| 172 |
+
|
| 173 |
+
Returns dict with:
|
| 174 |
+
'kawasaki': float # fraction of interior vertices passing Kawasaki [0,1]
|
| 175 |
+
'maekawa': float # fraction passing Maekawa [0,1]
|
| 176 |
+
'blb': float # fraction with no BLB violations [0,1]
|
| 177 |
+
'n_interior': int # number of interior vertices checked
|
| 178 |
+
'per_vertex': list[dict] # per-vertex details
|
| 179 |
+
"""
|
| 180 |
+
interior = graph.interior_vertices()
|
| 181 |
+
|
| 182 |
+
if not interior:
|
| 183 |
+
return {
|
| 184 |
+
'kawasaki': 1.0,
|
| 185 |
+
'maekawa': 1.0,
|
| 186 |
+
'blb': 1.0,
|
| 187 |
+
'n_interior': 0,
|
| 188 |
+
'per_vertex': [],
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
per_vertex = []
|
| 192 |
+
kaw_pass = 0
|
| 193 |
+
mae_pass = 0
|
| 194 |
+
blb_pass = 0
|
| 195 |
+
|
| 196 |
+
for vid in interior:
|
| 197 |
+
kaw_ok, kaw_val = check_kawasaki_at_vertex(vid, graph)
|
| 198 |
+
mae_ok = check_maekawa_at_vertex(vid, graph)
|
| 199 |
+
blb_violations = check_blb_at_vertex(vid, graph)
|
| 200 |
+
blb_ok = len(blb_violations) == 0
|
| 201 |
+
|
| 202 |
+
kaw_pass += int(kaw_ok)
|
| 203 |
+
mae_pass += int(mae_ok)
|
| 204 |
+
blb_pass += int(blb_ok)
|
| 205 |
+
|
| 206 |
+
per_vertex.append({
|
| 207 |
+
'vertex_id': vid,
|
| 208 |
+
'kawasaki_ok': kaw_ok,
|
| 209 |
+
'kawasaki_error': kaw_val,
|
| 210 |
+
'maekawa_ok': mae_ok,
|
| 211 |
+
'blb_violations': blb_violations,
|
| 212 |
+
})
|
| 213 |
+
|
| 214 |
+
n = len(interior)
|
| 215 |
+
return {
|
| 216 |
+
'kawasaki': kaw_pass / n,
|
| 217 |
+
'maekawa': mae_pass / n,
|
| 218 |
+
'blb': blb_pass / n,
|
| 219 |
+
'n_interior': n,
|
| 220 |
+
'per_vertex': per_vertex,
|
| 221 |
+
}
|
openenv.yaml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
spec_version: 1
|
| 2 |
+
name: optigami
|
| 3 |
+
type: space
|
| 4 |
+
runtime: fastapi
|
| 5 |
+
app: openenv_server.app:app
|
| 6 |
+
port: 8000
|
openenv_runtime/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenEnv integration runtime for Optigami."""
|
| 2 |
+
|
| 3 |
+
from .environment import OpenEnvOrigamiEnvironment
|
| 4 |
+
from .models import OrigamiAction, OrigamiObservation, OrigamiState
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"OpenEnvOrigamiEnvironment",
|
| 8 |
+
"OrigamiAction",
|
| 9 |
+
"OrigamiObservation",
|
| 10 |
+
"OrigamiState",
|
| 11 |
+
]
|
openenv_runtime/environment.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any, Optional
|
| 4 |
+
|
| 5 |
+
from openenv.core.env_server.interfaces import Environment
|
| 6 |
+
|
| 7 |
+
from env.environment import OrigamiEnvironment
|
| 8 |
+
|
| 9 |
+
from .models import OrigamiAction, OrigamiObservation, OrigamiState
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class OpenEnvOrigamiEnvironment(Environment[OrigamiAction, OrigamiObservation, OrigamiState]):
|
| 13 |
+
"""OpenEnv adapter over the existing OrigamiEnvironment implementation."""
|
| 14 |
+
|
| 15 |
+
SUPPORTS_CONCURRENT_SESSIONS = True
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
default_mode: str = "step",
|
| 20 |
+
max_steps: int = 8,
|
| 21 |
+
targets_dir: Optional[str] = None,
|
| 22 |
+
):
|
| 23 |
+
super().__init__()
|
| 24 |
+
self.default_mode = default_mode
|
| 25 |
+
self.max_steps = max_steps
|
| 26 |
+
self.targets_dir = targets_dir
|
| 27 |
+
self._env: Optional[OrigamiEnvironment] = None
|
| 28 |
+
self._episode_id: Optional[str] = None
|
| 29 |
+
|
| 30 |
+
def _new_env(self, mode: Optional[str] = None) -> OrigamiEnvironment:
|
| 31 |
+
return OrigamiEnvironment(
|
| 32 |
+
mode=mode or self.default_mode,
|
| 33 |
+
max_steps=self.max_steps,
|
| 34 |
+
targets_dir=self.targets_dir,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def reset(
|
| 38 |
+
self,
|
| 39 |
+
seed: Optional[int] = None,
|
| 40 |
+
episode_id: Optional[str] = None,
|
| 41 |
+
**kwargs: Any,
|
| 42 |
+
) -> OrigamiObservation:
|
| 43 |
+
del seed # deterministic seed plumbing can be added later
|
| 44 |
+
|
| 45 |
+
mode = kwargs.get("mode", self.default_mode)
|
| 46 |
+
target_name = kwargs.get("target_name")
|
| 47 |
+
|
| 48 |
+
self._env = self._new_env(mode=mode)
|
| 49 |
+
self._episode_id = episode_id
|
| 50 |
+
obs_dict = self._env.reset(target_name=target_name)
|
| 51 |
+
|
| 52 |
+
return OrigamiObservation(
|
| 53 |
+
done=False,
|
| 54 |
+
reward=None,
|
| 55 |
+
metadata={"available_targets": self._env.available_targets()},
|
| 56 |
+
prompt=obs_dict.get("prompt", ""),
|
| 57 |
+
target_name=obs_dict.get("target_name"),
|
| 58 |
+
step=obs_dict.get("step", 0),
|
| 59 |
+
paper_state=self._paper_state_snapshot(),
|
| 60 |
+
info=self._env._info(),
|
| 61 |
+
reward_components={},
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def step(
|
| 65 |
+
self,
|
| 66 |
+
action: OrigamiAction,
|
| 67 |
+
timeout_s: Optional[float] = None,
|
| 68 |
+
**kwargs: Any,
|
| 69 |
+
) -> OrigamiObservation:
|
| 70 |
+
del timeout_s, kwargs
|
| 71 |
+
|
| 72 |
+
if self._env is None:
|
| 73 |
+
self.reset(target_name=action.target_name)
|
| 74 |
+
|
| 75 |
+
assert self._env is not None
|
| 76 |
+
|
| 77 |
+
if action.target_name and action.target_name != self._env.target_name:
|
| 78 |
+
self.reset(target_name=action.target_name, mode=self._env.mode)
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
if action.mode == "sequence":
|
| 82 |
+
if not action.completion:
|
| 83 |
+
return self._error_observation("sequence mode requires completion")
|
| 84 |
+
|
| 85 |
+
seq_env = self._new_env(mode="code_as_policy")
|
| 86 |
+
seq_env.reset(target_name=self._env.target_name)
|
| 87 |
+
obs_dict, reward_dict, done, info = seq_env.step(action.completion)
|
| 88 |
+
self._env = seq_env
|
| 89 |
+
else:
|
| 90 |
+
if action.fold is not None:
|
| 91 |
+
fold_payload = {
|
| 92 |
+
"from": list(action.fold.from_point),
|
| 93 |
+
"to": list(action.fold.to_point),
|
| 94 |
+
"assignment": action.fold.assignment,
|
| 95 |
+
"instruction": action.fold.instruction,
|
| 96 |
+
}
|
| 97 |
+
env_action: Any = fold_payload
|
| 98 |
+
elif action.completion:
|
| 99 |
+
env_action = action.completion
|
| 100 |
+
else:
|
| 101 |
+
return self._error_observation("single mode requires fold or completion")
|
| 102 |
+
|
| 103 |
+
obs_dict, reward_dict, done, info = self._env.step(env_action)
|
| 104 |
+
|
| 105 |
+
total = reward_dict.get("total") if isinstance(reward_dict, dict) else None
|
| 106 |
+
return OrigamiObservation(
|
| 107 |
+
done=bool(done),
|
| 108 |
+
reward=float(total) if isinstance(total, (int, float)) else None,
|
| 109 |
+
metadata={"target_name": self._env.target_name},
|
| 110 |
+
prompt=obs_dict.get("prompt", ""),
|
| 111 |
+
target_name=obs_dict.get("target_name", self._env.target_name),
|
| 112 |
+
step=obs_dict.get("step", self._env.step_count),
|
| 113 |
+
paper_state=self._paper_state_snapshot(),
|
| 114 |
+
info=info or {},
|
| 115 |
+
reward_components=reward_dict or {},
|
| 116 |
+
)
|
| 117 |
+
except Exception as exc: # pragma: no cover - defensive path
|
| 118 |
+
return self._error_observation(str(exc))
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def state(self) -> OrigamiState:
|
| 122 |
+
if self._env is None:
|
| 123 |
+
tmp_env = self._new_env(mode=self.default_mode)
|
| 124 |
+
return OrigamiState(
|
| 125 |
+
episode_id=self._episode_id,
|
| 126 |
+
step_count=0,
|
| 127 |
+
mode=tmp_env.mode,
|
| 128 |
+
target_name=None,
|
| 129 |
+
paper={},
|
| 130 |
+
last_reward={},
|
| 131 |
+
available_targets=tmp_env.available_targets(),
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
env_state = self._env.state()
|
| 135 |
+
return OrigamiState(
|
| 136 |
+
episode_id=self._episode_id,
|
| 137 |
+
step_count=env_state.get("step", self._env.step_count),
|
| 138 |
+
mode=env_state.get("mode", self._env.mode),
|
| 139 |
+
target_name=env_state.get("target", self._env.target_name),
|
| 140 |
+
paper=env_state.get("paper", {}),
|
| 141 |
+
last_reward=self._env.last_reward or {},
|
| 142 |
+
available_targets=self._env.available_targets(),
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def close(self) -> None:
|
| 146 |
+
if self._env is not None:
|
| 147 |
+
self._env.close()
|
| 148 |
+
self._env = None
|
| 149 |
+
|
| 150 |
+
def _paper_state_snapshot(self) -> dict[str, Any]:
|
| 151 |
+
if self._env is None or self._env.paper is None:
|
| 152 |
+
return {"vertices": {}, "edges": [], "anchor_points": []}
|
| 153 |
+
|
| 154 |
+
graph = self._env.paper.graph
|
| 155 |
+
return {
|
| 156 |
+
"vertices": {str(k): [float(v[0]), float(v[1])] for k, v in graph.vertices.items()},
|
| 157 |
+
"edges": [
|
| 158 |
+
{
|
| 159 |
+
"id": int(eid),
|
| 160 |
+
"v1": [float(graph.vertices[v1][0]), float(graph.vertices[v1][1])],
|
| 161 |
+
"v2": [float(graph.vertices[v2][0]), float(graph.vertices[v2][1])],
|
| 162 |
+
"assignment": assignment,
|
| 163 |
+
}
|
| 164 |
+
for eid, (v1, v2, assignment) in graph.edges.items()
|
| 165 |
+
],
|
| 166 |
+
"anchor_points": [
|
| 167 |
+
[float(x), float(y)] for (x, y) in self._env.paper.anchor_points()
|
| 168 |
+
],
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
def _error_observation(self, message: str) -> OrigamiObservation:
|
| 172 |
+
return OrigamiObservation(
|
| 173 |
+
done=False,
|
| 174 |
+
reward=-0.1,
|
| 175 |
+
metadata={"error": True},
|
| 176 |
+
prompt="",
|
| 177 |
+
target_name=self._env.target_name if self._env else None,
|
| 178 |
+
step=self._env.step_count if self._env else 0,
|
| 179 |
+
paper_state=self._paper_state_snapshot(),
|
| 180 |
+
info=self._env._info() if self._env else {},
|
| 181 |
+
reward_components={"format": 0.0, "total": -0.1, "error": message},
|
| 182 |
+
error=message,
|
| 183 |
+
)
|
openenv_runtime/models.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any, Literal, Optional
|
| 4 |
+
|
| 5 |
+
from pydantic import BaseModel, Field, field_validator
|
| 6 |
+
|
| 7 |
+
from openenv.core.env_server.types import Action, Observation, State
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class OrigamiFold(BaseModel):
|
| 11 |
+
"""Single fold action payload for step-level execution."""
|
| 12 |
+
|
| 13 |
+
from_point: list[float] = Field(..., description="Fold line start [x, y]")
|
| 14 |
+
to_point: list[float] = Field(..., description="Fold line end [x, y]")
|
| 15 |
+
assignment: Literal["M", "V"] = Field(..., description="Mountain or valley")
|
| 16 |
+
instruction: str = Field(default="", description="Optional natural language instruction")
|
| 17 |
+
|
| 18 |
+
@field_validator("from_point", "to_point")
|
| 19 |
+
@classmethod
|
| 20 |
+
def _validate_point(cls, point: list[float]) -> list[float]:
|
| 21 |
+
if len(point) != 2:
|
| 22 |
+
raise ValueError("Point must contain exactly 2 coordinates")
|
| 23 |
+
return [float(point[0]), float(point[1])]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class OrigamiAction(Action):
|
| 27 |
+
"""
|
| 28 |
+
OpenEnv action for Optigami.
|
| 29 |
+
|
| 30 |
+
Modes:
|
| 31 |
+
- single: execute one fold (pass `fold` or JSON `completion` for a single-fold object)
|
| 32 |
+
- sequence: execute a full <folds>[...]</folds> completion in one step
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
mode: Literal["single", "sequence"] = Field(default="single")
|
| 36 |
+
fold: Optional[OrigamiFold] = Field(default=None)
|
| 37 |
+
completion: Optional[str] = Field(default=None)
|
| 38 |
+
target_name: Optional[str] = Field(
|
| 39 |
+
default=None,
|
| 40 |
+
description="Optional target override; reset to this target before stepping",
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class OrigamiObservation(Observation):
|
| 45 |
+
"""OpenEnv observation payload returned by Optigami."""
|
| 46 |
+
|
| 47 |
+
prompt: str = Field(default="")
|
| 48 |
+
target_name: Optional[str] = Field(default=None)
|
| 49 |
+
step: int = Field(default=0)
|
| 50 |
+
paper_state: dict[str, Any] = Field(default_factory=dict)
|
| 51 |
+
info: dict[str, Any] = Field(default_factory=dict)
|
| 52 |
+
reward_components: dict[str, float | int | str] = Field(default_factory=dict)
|
| 53 |
+
error: Optional[str] = Field(default=None)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class OrigamiState(State):
|
| 57 |
+
"""OpenEnv state payload for Optigami."""
|
| 58 |
+
|
| 59 |
+
mode: str = Field(default="step")
|
| 60 |
+
target_name: Optional[str] = Field(default=None)
|
| 61 |
+
paper: dict[str, Any] = Field(default_factory=dict)
|
| 62 |
+
last_reward: dict[str, Any] = Field(default_factory=dict)
|
| 63 |
+
available_targets: list[str] = Field(default_factory=list)
|
openenv_server/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""OpenEnv FastAPI app package."""
|
openenv_server/app.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from fastapi.responses import HTMLResponse
|
| 6 |
+
from fastapi.staticfiles import StaticFiles
|
| 7 |
+
from openenv.core.env_server.http_server import create_app
|
| 8 |
+
|
| 9 |
+
from openenv_runtime.environment import OpenEnvOrigamiEnvironment
|
| 10 |
+
from openenv_runtime.models import OrigamiAction, OrigamiObservation
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
app = create_app(
|
| 14 |
+
env=lambda: OpenEnvOrigamiEnvironment(),
|
| 15 |
+
action_cls=OrigamiAction,
|
| 16 |
+
observation_cls=OrigamiObservation,
|
| 17 |
+
env_name="optigami",
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# ---------------------------------------------------------------------------
|
| 22 |
+
# Demo fold sequences β new format: type, line {start, end}, angle
|
| 23 |
+
# ---------------------------------------------------------------------------
|
| 24 |
+
|
| 25 |
+
DEMO_SEQUENCES: dict[str, list[dict]] = {
|
| 26 |
+
"half_fold": [
|
| 27 |
+
{"type": "valley", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 180.0},
|
| 28 |
+
],
|
| 29 |
+
"quarter_fold": [
|
| 30 |
+
{"type": "valley", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 180.0},
|
| 31 |
+
{"type": "valley", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 180.0},
|
| 32 |
+
],
|
| 33 |
+
"letter_fold": [
|
| 34 |
+
{"type": "valley", "line": {"start": [0.0, 0.333], "end": [1.0, 0.333]}, "angle": 180.0},
|
| 35 |
+
{"type": "mountain", "line": {"start": [0.0, 0.667], "end": [1.0, 0.667]}, "angle": 180.0},
|
| 36 |
+
],
|
| 37 |
+
"map_fold": [
|
| 38 |
+
{"type": "valley", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 180.0},
|
| 39 |
+
{"type": "mountain", "line": {"start": [0.5, 0.0], "end": [0.5, 1.0]}, "angle": 180.0},
|
| 40 |
+
],
|
| 41 |
+
"solar_panel": [
|
| 42 |
+
{"type": "valley", "line": {"start": [0.0, 0.25], "end": [1.0, 0.25]}, "angle": 180.0},
|
| 43 |
+
{"type": "mountain", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 180.0},
|
| 44 |
+
{"type": "valley", "line": {"start": [0.0, 0.75], "end": [1.0, 0.75]}, "angle": 180.0},
|
| 45 |
+
],
|
| 46 |
+
"shelter_wall": [
|
| 47 |
+
{"type": "valley", "line": {"start": [0.0, 0.333], "end": [1.0, 0.333]}, "angle": 180.0},
|
| 48 |
+
{"type": "valley", "line": {"start": [0.0, 0.667], "end": [1.0, 0.667]}, "angle": 180.0},
|
| 49 |
+
],
|
| 50 |
+
"stent": [
|
| 51 |
+
{"type": "valley", "line": {"start": [0.0, 0.25], "end": [1.0, 0.25]}, "angle": 90.0},
|
| 52 |
+
{"type": "mountain", "line": {"start": [0.0, 0.5], "end": [1.0, 0.5]}, "angle": 90.0},
|
| 53 |
+
{"type": "valley", "line": {"start": [0.0, 0.75], "end": [1.0, 0.75]}, "angle": 90.0},
|
| 54 |
+
{"type": "stop", "line": {"start": [0.0, 0.0], "end": [1.0, 1.0]}, "angle": 0.0},
|
| 55 |
+
],
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ---------------------------------------------------------------------------
|
| 60 |
+
# API routes β must be registered BEFORE the StaticFiles catch-all mount
|
| 61 |
+
# ---------------------------------------------------------------------------
|
| 62 |
+
|
| 63 |
+
@app.get("/targets", include_in_schema=True)
|
| 64 |
+
def get_targets() -> dict:
|
| 65 |
+
"""Return available task names and metadata for the frontend."""
|
| 66 |
+
from server.tasks import get_task_by_name, available_task_names
|
| 67 |
+
|
| 68 |
+
result: dict[str, dict] = {}
|
| 69 |
+
for name in available_task_names():
|
| 70 |
+
t = get_task_by_name(name)
|
| 71 |
+
result[name] = {
|
| 72 |
+
"name": name,
|
| 73 |
+
"level": t.get("difficulty", 1),
|
| 74 |
+
"description": t.get("description", ""),
|
| 75 |
+
"n_creases": t.get("max_folds", 3),
|
| 76 |
+
"difficulty": t.get("difficulty", 1),
|
| 77 |
+
"material": t.get("material", "paper"),
|
| 78 |
+
}
|
| 79 |
+
return result
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@app.get("/episode/demo", include_in_schema=True)
|
| 83 |
+
def demo_episode(target: str = "half_fold") -> dict:
|
| 84 |
+
"""Return a pre-solved demo episode for the given task."""
|
| 85 |
+
from server.origami_environment import OrigamiEnvironment
|
| 86 |
+
from server.models import OrigamiAction as NewOrigamiAction
|
| 87 |
+
from server.tasks import get_task_by_name
|
| 88 |
+
|
| 89 |
+
# Fall back to half_fold if target not found
|
| 90 |
+
folds = DEMO_SEQUENCES.get(target, DEMO_SEQUENCES["half_fold"])
|
| 91 |
+
|
| 92 |
+
env = OrigamiEnvironment()
|
| 93 |
+
obs = env.reset(task_name=target)
|
| 94 |
+
|
| 95 |
+
steps: list[dict] = []
|
| 96 |
+
|
| 97 |
+
for i, fold_dict in enumerate(folds):
|
| 98 |
+
if fold_dict.get("type") == "stop":
|
| 99 |
+
break
|
| 100 |
+
|
| 101 |
+
action = NewOrigamiAction(
|
| 102 |
+
fold_type=fold_dict["type"],
|
| 103 |
+
fold_line=fold_dict["line"],
|
| 104 |
+
fold_angle=float(fold_dict.get("angle", 180.0)),
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
obs = env.step(action)
|
| 108 |
+
|
| 109 |
+
steps.append({
|
| 110 |
+
"step": i + 1,
|
| 111 |
+
"fold": fold_dict,
|
| 112 |
+
"paper_state": obs.paper_state,
|
| 113 |
+
"metrics": obs.metrics,
|
| 114 |
+
"done": obs.done,
|
| 115 |
+
})
|
| 116 |
+
|
| 117 |
+
if obs.done:
|
| 118 |
+
break
|
| 119 |
+
|
| 120 |
+
task_def = get_task_by_name(target) if target else {}
|
| 121 |
+
|
| 122 |
+
return {
|
| 123 |
+
"task_name": target,
|
| 124 |
+
"task": task_def,
|
| 125 |
+
"steps": steps,
|
| 126 |
+
"final_metrics": obs.metrics if steps else {},
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# ---------------------------------------------------------------------------
|
| 131 |
+
# Static file serving β must come LAST so API routes take priority
|
| 132 |
+
# ---------------------------------------------------------------------------
|
| 133 |
+
|
| 134 |
+
_BUILD_DIR = Path(__file__).resolve().parent.parent / "build"
|
| 135 |
+
|
| 136 |
+
if _BUILD_DIR.exists():
|
| 137 |
+
app.mount("/", StaticFiles(directory=str(_BUILD_DIR), html=True), name="renderer")
|
| 138 |
+
else:
|
| 139 |
+
@app.get("/", include_in_schema=False)
|
| 140 |
+
def missing_renderer_build() -> HTMLResponse:
|
| 141 |
+
return HTMLResponse(
|
| 142 |
+
"""
|
| 143 |
+
<html><body style="font-family: sans-serif; margin: 24px;">
|
| 144 |
+
<h3>Renderer build not found</h3>
|
| 145 |
+
<p>No <code>build/</code> directory is present in the container.</p>
|
| 146 |
+
<p>OpenEnv API docs are available at <a href="/docs">/docs</a>.</p>
|
| 147 |
+
</body></html>
|
| 148 |
+
""",
|
| 149 |
+
status_code=200,
|
| 150 |
+
)
|
package-lock.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
plans/implementation_plan.md
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Optigami β Implementation Plan
|
| 2 |
+
|
| 3 |
+
> Derived from handoff doc critique, origami math/physics research, and plan review.
|
| 4 |
+
> Last updated: 2026-03-07
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
## Resolved Architectural Decisions
|
| 9 |
+
|
| 10 |
+
### 1. Code-as-policy for training, step-level for demo
|
| 11 |
+
|
| 12 |
+
GRPO samples N completions for a fixed prompt, evaluates each independently, computes group advantages. That maps cleanly to **code-as-policy**: the model outputs a complete fold sequence as a JSON list, the environment executes it sequentially, terminal reward is computed once.
|
| 13 |
+
|
| 14 |
+
Step-level breaks GRPO's assumption: at step k, the prompt is conditioned on prior steps which differ across rollouts, so you're no longer comparing N completions to the same situation.
|
| 15 |
+
|
| 16 |
+
**Resolution:** Training is code-as-policy (full sequence β single reward). Demo is step-by-step (one fold at a time with live feedback). Same environment, different prompt wrapper. Same model at inference β you just prompt it one fold at a time for the demo.
|
| 17 |
+
|
| 18 |
+
### 2. 2D crease pattern is Phase 1, engineering metrics are Phase 2
|
| 19 |
+
|
| 20 |
+
**Phase 1 (hackathon MVP):** Build the crease pattern graph, check local foldability, use geometric coverage as progress proxy. Self-contained, can show reward improvement.
|
| 21 |
+
|
| 22 |
+
**Phase 2 (if time permits):** Apply fold angles to compute the 3D folded state, compute deployment ratio and bounding box. These become the primary reward, with crease coverage as scaffolding. This is where the "model discovers Miura-ori" story lives.
|
| 23 |
+
|
| 24 |
+
If the deadline forces a cut, Phase 1 ships and Phase 2 is explicitly called out as the next step.
|
| 25 |
+
|
| 26 |
+
### 3. Scope to local flat-foldability (NP-hardness acknowledged)
|
| 27 |
+
|
| 28 |
+
Global flat-foldability (layer ordering) is NP-complete (Bern-Hayes 1996). We target **local flat-foldability** at each vertex, which is polynomial. This is a feature, not a limitation β the pitch: "our rewards check the conditions every origami designer verifies. Global layer ordering is provably NP-complete."
|
| 29 |
+
|
| 30 |
+
### 4. Symmetry masking is a noted risk
|
| 31 |
+
|
| 32 |
+
For Level 1-2 targets the anchor set is small (β€8 points), manageable. For Level 3+, intersection vertices accumulate to 15-20+ points, giving O(300+) candidate fold lines. The unit square has dihedral-4 symmetry (4 rotations + 4 reflections). For Level 3+, if training shows no convergence after 500 steps, add explicit symmetry-based action pruning.
|
| 33 |
+
|
| 34 |
+
---
|
| 35 |
+
|
| 36 |
+
## File Structure
|
| 37 |
+
|
| 38 |
+
```
|
| 39 |
+
optigami/
|
| 40 |
+
env/
|
| 41 |
+
__init__.py
|
| 42 |
+
graph.py # CreaseGraph: vertices, edges, cyclic ordering
|
| 43 |
+
paper_state.py # PaperState using CreaseGraph, add_crease
|
| 44 |
+
verifier.py # Kawasaki, Maekawa, BLB, coverage, deployment ratio
|
| 45 |
+
rewards.py # compute_reward (Phase 1 + Phase 2 extension)
|
| 46 |
+
environment.py # OpenEnv wrapper, code-as-policy and step modes
|
| 47 |
+
prompts.py # LLM observation formatting
|
| 48 |
+
fold_engine.py # Phase 2: apply fold angles, compute 3D bounding box
|
| 49 |
+
targets/
|
| 50 |
+
validator.py # crimp-check all .fold files before training
|
| 51 |
+
half_horizontal.fold
|
| 52 |
+
half_vertical.fold
|
| 53 |
+
diagonal.fold
|
| 54 |
+
cross_fold.fold
|
| 55 |
+
x_fold.fold
|
| 56 |
+
pinwheel_base.fold
|
| 57 |
+
preliminary_base.fold
|
| 58 |
+
fish_base.fold
|
| 59 |
+
train.py
|
| 60 |
+
requirements.txt
|
| 61 |
+
src/ # React demo visualizer (existing)
|
| 62 |
+
plans/
|
| 63 |
+
implementation_plan.md
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
---
|
| 67 |
+
|
| 68 |
+
## Phase 1: CreaseGraph (`env/graph.py`)
|
| 69 |
+
|
| 70 |
+
Everything builds on this. Get it right first.
|
| 71 |
+
|
| 72 |
+
**Data:**
|
| 73 |
+
- `vertices`: `dict[vertex_id β (x, y)]`
|
| 74 |
+
- `edges`: `dict[edge_id β (v1, v2, assignment)]` where assignment β `{M, V, B}`
|
| 75 |
+
- `vertex_edges`: `dict[vertex_id β [edge_ids]]`
|
| 76 |
+
|
| 77 |
+
**Key operations:**
|
| 78 |
+
- `add_vertex(x, y, tol=1e-9)` β deduplicated by proximity
|
| 79 |
+
- `add_edge(v1, v2, assignment)` β no duplicates
|
| 80 |
+
- `get_cyclic_edges(vertex_id)` β incident edge IDs sorted by angle of the other endpoint around the vertex (the cyclic order Kawasaki requires)
|
| 81 |
+
- `interior_vertices()` β vertices not on the unit square boundary
|
| 82 |
+
- `split_edge(edge_id, new_vertex_id)` β splits an edge at a vertex, used when a new crease intersects an existing one
|
| 83 |
+
|
| 84 |
+
**`add_crease(p1, p2, assignment)` in `PaperState`:**
|
| 85 |
+
1. Validate both endpoints are in the anchor set (within tolerance)
|
| 86 |
+
2. Find all intersections with existing edges
|
| 87 |
+
3. Add intersection vertices and split existing edges at them
|
| 88 |
+
4. Add the new crease edge(s) (possibly split by intersections)
|
| 89 |
+
5. Return `{valid, anchored, new_vertices, errors}`
|
| 90 |
+
|
| 91 |
+
**Anchor point set** (grows as creases are added):
|
| 92 |
+
- Boundary corners: `(0,0), (1,0), (1,1), (0,1)`
|
| 93 |
+
- Boundary midpoints of any existing boundary edge
|
| 94 |
+
- All crease-crease intersection vertices
|
| 95 |
+
- Midpoints of existing crease edges
|
| 96 |
+
|
| 97 |
+
---
|
| 98 |
+
|
| 99 |
+
## Phase 2: Verifiers (`env/verifier.py`)
|
| 100 |
+
|
| 101 |
+
### Even-degree fast-fail
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
def has_even_degree(vertex_id, graph) -> bool:
|
| 105 |
+
return len(graph.get_cyclic_edges(vertex_id)) % 2 == 0
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
Runs before Kawasaki. Odd-degree interior vertices are impossible β short-circuit immediately.
|
| 109 |
+
|
| 110 |
+
### Kawasaki-Justin
|
| 111 |
+
|
| 112 |
+
Sector angles must be computed in **cyclic angular order** around each vertex β not by magnitude, not arbitrarily. The handoff's sorted-angle approach was wrong; cyclic order is recovered by sorting incident edge directions by `arctan2`.
|
| 113 |
+
|
| 114 |
+
```python
|
| 115 |
+
def check_kawasaki_at_vertex(vertex_id, graph) -> tuple[bool, float]:
|
| 116 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id) # sorted by angle
|
| 117 |
+
n = len(cyclic_edges)
|
| 118 |
+
if n % 2 != 0:
|
| 119 |
+
return False, float('inf')
|
| 120 |
+
if n < 4:
|
| 121 |
+
return True, 0.0 # boundary vertex, not an interior fold vertex
|
| 122 |
+
|
| 123 |
+
v = graph.vertices[vertex_id]
|
| 124 |
+
angles = []
|
| 125 |
+
for eid in cyclic_edges:
|
| 126 |
+
v1, v2, _ = graph.edges[eid]
|
| 127 |
+
other = v2 if v1 == vertex_id else v1
|
| 128 |
+
other_pos = graph.vertices[other]
|
| 129 |
+
angles.append(np.arctan2(other_pos[1] - v[1], other_pos[0] - v[0]))
|
| 130 |
+
# angles is already in cyclic order (cyclic_edges sorted by angle)
|
| 131 |
+
|
| 132 |
+
sectors = []
|
| 133 |
+
for i in range(n):
|
| 134 |
+
diff = angles[(i+1) % n] - angles[i]
|
| 135 |
+
if diff < 0:
|
| 136 |
+
diff += 2 * np.pi
|
| 137 |
+
sectors.append(diff)
|
| 138 |
+
|
| 139 |
+
alt_sum = sum(s * ((-1)**i) for i, s in enumerate(sectors))
|
| 140 |
+
return abs(alt_sum) < 1e-9, abs(alt_sum)
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
### Maekawa-Justin
|
| 144 |
+
|
| 145 |
+
Boundary edges (`B`) must not be counted β only fold edges (`M`, `V`). The handoff counted boundary edges, which breaks Maekawa for any crease touching the paper edge.
|
| 146 |
+
|
| 147 |
+
```python
|
| 148 |
+
def check_maekawa_at_vertex(vertex_id, graph) -> bool:
|
| 149 |
+
fold_edges = [eid for eid in graph.vertex_edges[vertex_id]
|
| 150 |
+
if graph.edges[eid][2] in ('M', 'V')]
|
| 151 |
+
if len(fold_edges) < 4:
|
| 152 |
+
return True # not an interior fold vertex yet
|
| 153 |
+
M = sum(1 for eid in fold_edges if graph.edges[eid][2] == 'M')
|
| 154 |
+
V = len(fold_edges) - M
|
| 155 |
+
return abs(M - V) == 2
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
### Big-Little-Big (BLB)
|
| 159 |
+
|
| 160 |
+
At any interior vertex, if a sector angle is a strict local minimum, the two crease lines bounding that sector must have **opposite MV parity**. This is the key pruning rule between Maekawa and layer-ordering β a pattern can satisfy Maekawa while violating BLB, meaning no valid layer ordering exists.
|
| 161 |
+
|
| 162 |
+
```python
|
| 163 |
+
def check_blb_at_vertex(vertex_id, graph) -> list[tuple]:
|
| 164 |
+
"""Returns list of (edge_a, edge_b) pairs where BLB is violated."""
|
| 165 |
+
cyclic_edges = graph.get_cyclic_edges(vertex_id)
|
| 166 |
+
n = len(cyclic_edges)
|
| 167 |
+
if n < 4:
|
| 168 |
+
return []
|
| 169 |
+
sectors = _compute_sectors(vertex_id, cyclic_edges, graph)
|
| 170 |
+
violations = []
|
| 171 |
+
for i in range(n):
|
| 172 |
+
prev_s = sectors[(i-1) % n]
|
| 173 |
+
next_s = sectors[(i+1) % n]
|
| 174 |
+
if sectors[i] < prev_s and sectors[i] < next_s: # strict local min
|
| 175 |
+
left_eid = cyclic_edges[i]
|
| 176 |
+
right_eid = cyclic_edges[(i+1) % n]
|
| 177 |
+
a_left = graph.edges[left_eid][2]
|
| 178 |
+
a_right = graph.edges[right_eid][2]
|
| 179 |
+
if a_left in ('M', 'V') and a_right in ('M', 'V') and a_left == a_right:
|
| 180 |
+
violations.append((left_eid, right_eid))
|
| 181 |
+
return violations
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
### Geometric Coverage (with excess penalty)
|
| 185 |
+
|
| 186 |
+
One-sided coverage alone rewards placing target creases but doesn't penalize surplus creases. Both are returned separately so the reward function can weight them independently.
|
| 187 |
+
|
| 188 |
+
```python
|
| 189 |
+
def geometric_coverage(state, target_edges, tol_pos=0.05, tol_angle=5.0) -> tuple[float, float]:
|
| 190 |
+
"""
|
| 191 |
+
Returns (coverage, economy).
|
| 192 |
+
coverage: fraction of target creases matched by current creases [0, 1]
|
| 193 |
+
economy: penalty for excess creases [0, 1], 1.0 = no excess
|
| 194 |
+
"""
|
| 195 |
+
matched = 0
|
| 196 |
+
for t_edge in target_edges:
|
| 197 |
+
for c_edge in state.crease_edges():
|
| 198 |
+
if _edges_match(t_edge, c_edge, tol_pos, tol_angle):
|
| 199 |
+
matched += 1
|
| 200 |
+
break
|
| 201 |
+
n_target = max(len(target_edges), 1)
|
| 202 |
+
n_current = len(state.crease_edges())
|
| 203 |
+
coverage = matched / n_target
|
| 204 |
+
economy = max(0.0, 1.0 - max(0, n_current - n_target) / n_target)
|
| 205 |
+
return coverage, economy
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
---
|
| 209 |
+
|
| 210 |
+
## Phase 3: Reward Function (`env/rewards.py`)
|
| 211 |
+
|
| 212 |
+
### Phase 1 reward
|
| 213 |
+
|
| 214 |
+
Single consistent definition. `progress` carries 45% β it's the only signal with real geometric content at every step. Validity signals split 20% total. Economy penalizes excess creases.
|
| 215 |
+
|
| 216 |
+
```python
|
| 217 |
+
def compute_reward_phase1(state, action_result, target) -> dict:
|
| 218 |
+
r = {}
|
| 219 |
+
|
| 220 |
+
r['format'] = 1.0 if action_result['valid'] else 0.0
|
| 221 |
+
if not r['format']:
|
| 222 |
+
return {**r, 'total': -0.1}
|
| 223 |
+
|
| 224 |
+
r['anchored'] = 1.0 if action_result['anchored'] else 0.3
|
| 225 |
+
|
| 226 |
+
interior = state.graph.interior_vertices()
|
| 227 |
+
n = max(len(interior), 1)
|
| 228 |
+
|
| 229 |
+
kaw = [check_kawasaki_at_vertex(v, state.graph) for v in interior]
|
| 230 |
+
mae = [check_maekawa_at_vertex(v, state.graph) for v in interior]
|
| 231 |
+
blb = [check_blb_at_vertex(v, state.graph) for v in interior]
|
| 232 |
+
|
| 233 |
+
r['kawasaki'] = sum(ok for ok, _ in kaw) / n
|
| 234 |
+
r['maekawa'] = sum(mae) / n
|
| 235 |
+
r['blb'] = 1.0 - sum(len(v) > 0 for v in blb) / n
|
| 236 |
+
|
| 237 |
+
coverage, economy = geometric_coverage(state, target['edges'])
|
| 238 |
+
r['progress'] = coverage
|
| 239 |
+
r['economy'] = economy
|
| 240 |
+
|
| 241 |
+
all_valid = (r['kawasaki'] == 1.0 and r['maekawa'] == 1.0 and r['blb'] == 1.0)
|
| 242 |
+
r['completion'] = 10.0 if (r['progress'] > 0.9 and all_valid) else 0.0
|
| 243 |
+
r['efficiency'] = -0.01
|
| 244 |
+
|
| 245 |
+
r['total'] = (
|
| 246 |
+
0.05 * r['anchored'] +
|
| 247 |
+
0.08 * r['kawasaki'] +
|
| 248 |
+
0.07 * r['maekawa'] +
|
| 249 |
+
0.05 * r['blb'] +
|
| 250 |
+
0.45 * r['progress'] +
|
| 251 |
+
0.10 * r['economy'] +
|
| 252 |
+
r['completion'] +
|
| 253 |
+
r['efficiency']
|
| 254 |
+
)
|
| 255 |
+
return r
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
### Phase 2 reward extension
|
| 259 |
+
|
| 260 |
+
When `fold_engine.py` is available, replace `progress` and `economy` with engineering metrics. No pre-specified target pattern required β the model optimizes objectives directly and can discover that Miura-ori is optimal.
|
| 261 |
+
|
| 262 |
+
```python
|
| 263 |
+
def compute_reward_phase2(state, action_result, folded_state) -> dict:
|
| 264 |
+
# ... same gates as phase 1 ...
|
| 265 |
+
|
| 266 |
+
r['deployment_ratio'] = compute_deployment_ratio(folded_state)
|
| 267 |
+
# = unfolded_area / folded_bounding_box_area
|
| 268 |
+
|
| 269 |
+
r['bbox_compactness'] = 1.0 - (folded_bbox_area / unfolded_area)
|
| 270 |
+
# higher = more compact fold
|
| 271 |
+
|
| 272 |
+
r['total'] = (
|
| 273 |
+
0.05 * r['anchored'] +
|
| 274 |
+
0.08 * r['kawasaki'] +
|
| 275 |
+
0.07 * r['maekawa'] +
|
| 276 |
+
0.05 * r['blb'] +
|
| 277 |
+
0.30 * r['deployment_ratio'] +
|
| 278 |
+
0.20 * r['bbox_compactness'] +
|
| 279 |
+
0.05 * r['economy'] +
|
| 280 |
+
r['completion'] +
|
| 281 |
+
r['efficiency']
|
| 282 |
+
)
|
| 283 |
+
return r
|
| 284 |
+
```
|
| 285 |
+
|
| 286 |
+
---
|
| 287 |
+
|
| 288 |
+
## Phase 4: Prompts (`env/prompts.py`)
|
| 289 |
+
|
| 290 |
+
### Code-as-policy prompt (training mode)
|
| 291 |
+
|
| 292 |
+
```
|
| 293 |
+
You are an origami designer. Generate a complete fold sequence for a unit square [0,1]x[0,1].
|
| 294 |
+
|
| 295 |
+
TARGET CREASE PATTERN:
|
| 296 |
+
Valley fold: (0.0, 0.5) -> (1.0, 0.5)
|
| 297 |
+
Mountain fold: (0.5, 0.0) -> (0.5, 1.0)
|
| 298 |
+
|
| 299 |
+
RULES (your sequence must satisfy at every interior vertex):
|
| 300 |
+
- Kawasaki: alternating sector angles sum equally (each half = 180 degrees)
|
| 301 |
+
- Maekawa: |mountain_count - valley_count| = 2
|
| 302 |
+
- Big-Little-Big: folds bounding the smallest sector must have opposite types
|
| 303 |
+
|
| 304 |
+
ANCHOR POINTS (valid fold endpoints):
|
| 305 |
+
Corners: (0,0) (1,0) (1,1) (0,1)
|
| 306 |
+
Midpoints: (0.5,0) (1,0.5) (0.5,1) (0,0.5)
|
| 307 |
+
Note: the square has 4-fold dihedral symmetry β symmetric fold sequences are equivalent.
|
| 308 |
+
|
| 309 |
+
Output a JSON list of fold operations in order. Both endpoints must be anchor points.
|
| 310 |
+
|
| 311 |
+
<folds>
|
| 312 |
+
[
|
| 313 |
+
{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"|"V"},
|
| 314 |
+
...
|
| 315 |
+
]
|
| 316 |
+
</folds>
|
| 317 |
+
```
|
| 318 |
+
|
| 319 |
+
### Step-level prompt (demo mode)
|
| 320 |
+
|
| 321 |
+
Same information, but shows only the current step's observation with prior fold history and last-step reward appended. Same model, different prompt wrapper.
|
| 322 |
+
|
| 323 |
+
```
|
| 324 |
+
... [same header] ...
|
| 325 |
+
|
| 326 |
+
CURRENT STATE (step 2 of 8):
|
| 327 |
+
Creases placed:
|
| 328 |
+
1. Mountain fold: (0.5, 0.0) -> (0.5, 1.0)
|
| 329 |
+
|
| 330 |
+
AVAILABLE ANCHOR POINTS:
|
| 331 |
+
Corners: (0.0,0.0) (1.0,0.0) (1.0,1.0) (0.0,1.0)
|
| 332 |
+
Edge midpoints:(0.5,0.0) (1.0,0.5) (0.5,1.0) (0.0,0.5)
|
| 333 |
+
Intersections: (0.5,0.5)
|
| 334 |
+
|
| 335 |
+
LAST REWARD: format=1.0 kawasaki=1.0 maekawa=1.0 blb=1.0 progress=0.32 total=0.33
|
| 336 |
+
|
| 337 |
+
Add the next crease. Output JSON only:
|
| 338 |
+
{"instruction": "...", "from": [x1, y1], "to": [x2, y2], "assignment": "M"|"V"}
|
| 339 |
+
```
|
| 340 |
+
|
| 341 |
+
---
|
| 342 |
+
|
| 343 |
+
## Phase 5: Target Files + Validator (`env/targets/`)
|
| 344 |
+
|
| 345 |
+
Targets are hand-authored `.fold` JSON. Before any target enters training, `validator.py` runs:
|
| 346 |
+
|
| 347 |
+
1. Parse FOLD JSON, reconstruct the CreaseGraph
|
| 348 |
+
2. For each interior vertex: even-degree β Kawasaki β Maekawa β BLB
|
| 349 |
+
3. Enumerate at least one valid MV assignment via the crimp algorithm
|
| 350 |
+
4. Fail loudly with vertex + violation details if any check fails
|
| 351 |
+
|
| 352 |
+
**Target set:**
|
| 353 |
+
|
| 354 |
+
| File | Creases | Level | Interior vertices |
|
| 355 |
+
|------|---------|-------|-------------------|
|
| 356 |
+
| `half_horizontal.fold` | 1 | 1 | 0 |
|
| 357 |
+
| `half_vertical.fold` | 1 | 1 | 0 |
|
| 358 |
+
| `diagonal.fold` | 1 | 1 | 0 |
|
| 359 |
+
| `cross_fold.fold` | 2 | 2 | 1 (degree 4) |
|
| 360 |
+
| `x_fold.fold` | 2 | 2 | 1 (degree 4) |
|
| 361 |
+
| `pinwheel_base.fold` | 4 | 2 | 4 |
|
| 362 |
+
| `preliminary_base.fold` | 4 | 3 | 4 |
|
| 363 |
+
| `fish_base.fold` | 6 | 3 | 6 |
|
| 364 |
+
|
| 365 |
+
Level 1 targets have zero interior vertices β Kawasaki/Maekawa are vacuously satisfied, the only reward signal is `progress`. The model learns to place geometrically correct folds before worrying about vertex constraints.
|
| 366 |
+
|
| 367 |
+
---
|
| 368 |
+
|
| 369 |
+
## Phase 6: OpenEnv Wrapper (`env/environment.py`)
|
| 370 |
+
|
| 371 |
+
Both modes supported. The `info` dict explicitly labels what is and isn't checked.
|
| 372 |
+
|
| 373 |
+
```python
|
| 374 |
+
class OrigamiEnvironment(Environment):
|
| 375 |
+
|
| 376 |
+
async def step(self, action):
|
| 377 |
+
if isinstance(action, list):
|
| 378 |
+
return self._execute_sequence(action) # code-as-policy
|
| 379 |
+
else:
|
| 380 |
+
return self._execute_single(action) # step mode
|
| 381 |
+
|
| 382 |
+
def _execute_sequence(self, folds):
|
| 383 |
+
for fold in folds:
|
| 384 |
+
result = self.paper.add_crease(
|
| 385 |
+
fold['from'], fold['to'], fold['assignment']
|
| 386 |
+
)
|
| 387 |
+
if not result['valid']:
|
| 388 |
+
break # partial credit: reward up to failure point
|
| 389 |
+
reward = compute_reward_phase1(self.paper, result, self.target)
|
| 390 |
+
return self._get_observation(), reward, True, self._info()
|
| 391 |
+
|
| 392 |
+
def _info(self):
|
| 393 |
+
interior = self.paper.graph.interior_vertices()
|
| 394 |
+
return {
|
| 395 |
+
'local_foldability': all(
|
| 396 |
+
check_kawasaki_at_vertex(v, self.paper.graph)[0] and
|
| 397 |
+
check_maekawa_at_vertex(v, self.paper.graph)
|
| 398 |
+
for v in interior
|
| 399 |
+
),
|
| 400 |
+
'blb_satisfied': all(
|
| 401 |
+
len(check_blb_at_vertex(v, self.paper.graph)) == 0
|
| 402 |
+
for v in interior
|
| 403 |
+
),
|
| 404 |
+
'global_foldability': 'not_checked', # NP-complete (Bern-Hayes 1996)
|
| 405 |
+
'n_interior_vertices': len(interior),
|
| 406 |
+
}
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
---
|
| 410 |
+
|
| 411 |
+
## Phase 7: Training Script (`train.py`)
|
| 412 |
+
|
| 413 |
+
Code-as-policy GRPO. Each completion is a complete fold sequence. N=8 completions per prompt evaluated in parallel, each with its own fresh `PaperState`. Terminal reward only.
|
| 414 |
+
|
| 415 |
+
```python
|
| 416 |
+
def origami_reward_fn(completions, prompts, targets):
|
| 417 |
+
rewards = []
|
| 418 |
+
for completion, target in zip(completions, targets):
|
| 419 |
+
try:
|
| 420 |
+
folds = parse_fold_list(completion) # extract JSON from <folds> tags
|
| 421 |
+
paper = PaperState()
|
| 422 |
+
for fold in folds:
|
| 423 |
+
paper.add_crease(fold['from'], fold['to'], fold['assignment'])
|
| 424 |
+
r = compute_reward_phase1(paper, {'valid': True, 'anchored': True}, target)
|
| 425 |
+
rewards.append(r['total'])
|
| 426 |
+
except Exception:
|
| 427 |
+
rewards.append(-0.1)
|
| 428 |
+
return rewards
|
| 429 |
+
```
|
| 430 |
+
|
| 431 |
+
Log all reward components separately (kawasaki, maekawa, blb, progress, economy) β the decomposed curves are the demo artifact showing the model learning to satisfy geometric constraints.
|
| 432 |
+
|
| 433 |
+
---
|
| 434 |
+
|
| 435 |
+
## Phase 8: Fold Engine / Phase 2 (`env/fold_engine.py`)
|
| 436 |
+
|
| 437 |
+
For flat-folded patterns (all creases at 180Β°), the folded bounding box is computable from crease pattern + simplified layer assignment. For Level 1-3 targets the layer assignment is tractable (polynomial for single-vertex, and our simple patterns have at most a few interior vertices).
|
| 438 |
+
|
| 439 |
+
Apply fold angles via reflection transforms, project to get 2D bounding box of the folded state, compute:
|
| 440 |
+
|
| 441 |
+
```
|
| 442 |
+
deployment_ratio = 1.0 / (folded_bbox_area / unfolded_area)
|
| 443 |
+
```
|
| 444 |
+
|
| 445 |
+
Higher = more compact = better engineering. With this signal the model can discover optimal fold patterns (Miura-ori, accordion folds) without a pre-specified target.
|
| 446 |
+
|
| 447 |
+
---
|
| 448 |
+
|
| 449 |
+
## Build Order
|
| 450 |
+
|
| 451 |
+
```
|
| 452 |
+
[ ] 1. requirements.txt (shapely, numpy, pytest)
|
| 453 |
+
[ ] 2. env/graph.py β CreaseGraph with cyclic ordering, split_edge
|
| 454 |
+
[ ] 3. Unit test: two crossing creases -> 1 interior vertex of degree 4, correct cyclic order
|
| 455 |
+
[ ] 4. env/paper_state.py β PaperState.add_crease with intersection handling
|
| 456 |
+
[ ] 5. env/verifier.py β even-degree, Kawasaki, Maekawa, BLB, geometric_coverage
|
| 457 |
+
[ ] 6. Unit test: degree-4 vertex with known valid/invalid angles -> Kawasaki pass/fail
|
| 458 |
+
[ ] 7. Unit test: single crease -> zero interior vertices -> verifiers return defaults (True)
|
| 459 |
+
[ ] 8. Unit test: excess crease penalty activates correctly
|
| 460 |
+
[ ] 9. targets/validator.py β crimp-check routine
|
| 461 |
+
[ ] 10. env/targets/*.fold β 4 Level 1 + 4 Level 2 targets, all passing validator
|
| 462 |
+
[ ] 11. env/rewards.py β Phase 1 compute_reward
|
| 463 |
+
[ ] 12. env/prompts.py β code-as-policy prompt + step-level prompt
|
| 464 |
+
[ ] 13. env/environment.py β both sequence and step modes + info dict
|
| 465 |
+
[ ] 14. Integration test: known valid sequence on half_horizontal, reward >= 0.9
|
| 466 |
+
[ ] 15. Integration test: invalid MV assignment on cross_fold, BLB fires
|
| 467 |
+
[ ] 16. train.py β GRPO with code-as-policy reward fn
|
| 468 |
+
[ ] 17. First training run on Level 1 targets, log all reward components to W&B
|
| 469 |
+
[ ] 18. env/fold_engine.py β Phase 2: fold angles -> 3D state -> deployment ratio
|
| 470 |
+
[ ] 19. Visualizer (React): render crease graph from FOLD JSON, animate fold history
|
| 471 |
+
```
|
| 472 |
+
|
| 473 |
+
Steps 2-3 and 5-8 are highest risk. Get the graph data structure and cyclic Kawasaki check correct before building anything on top of them. Steps 14-15 are the checkpoint before touching the training script.
|
| 474 |
+
|
| 475 |
+
---
|
| 476 |
+
|
| 477 |
+
## Key Risks
|
| 478 |
+
|
| 479 |
+
| Risk | Likelihood | Mitigation |
|
| 480 |
+
|------|-----------|------------|
|
| 481 |
+
| Cyclic sector angle computation incorrect | High | Explicit unit tests with known valid/invalid patterns |
|
| 482 |
+
| Level 3+ action space too large to learn | Medium | Dihedral symmetry hints in prompt; hard masking if no convergence after 500 steps |
|
| 483 |
+
| GRPO reward signal too sparse (no interior vertices on Level 1) | Medium | Level 1 reward is purely `progress`; works without vertex constraints |
|
| 484 |
+
| fold_engine Phase 2 infeasible in hackathon time | Medium | Phase 1 ships independently; Phase 2 is an extension |
|
| 485 |
+
| Layer ordering required for deployment ratio on complex patterns | Low | Level 1-3 patterns are tractable; flag NP-hardness in info dict |
|
pyproject.toml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["hatchling>=1.25.0"]
|
| 3 |
+
build-backend = "hatchling.build"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "optigami"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "Optigami OpenEnv origami environment"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.10"
|
| 11 |
+
dependencies = [
|
| 12 |
+
"fastapi>=0.100.0",
|
| 13 |
+
"numpy>=1.24.0",
|
| 14 |
+
"openenv-core[core]>=0.2.1",
|
| 15 |
+
"pydantic>=2.0.0",
|
| 16 |
+
"shapely>=2.0.0",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
[tool.pytest.ini_options]
|
| 20 |
+
pythonpath = ["."]
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
shapely>=2.0.0
|
| 2 |
+
numpy>=1.24.0
|
| 3 |
+
fastapi>=0.100.0
|
| 4 |
+
uvicorn>=0.23.0
|
| 5 |
+
pydantic>=2.0.0
|