ajthor commited on
Commit
a2efc8e
·
verified ·
1 Parent(s): af218bf

Upload folder using huggingface_hub

Browse files
.devcontainer/devcontainer.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // For format details, see https://aka.ms/devcontainer.json. For config options, see the
2
+ // README at: https://github.com/devcontainers/templates/tree/main/src/python
3
+ {
4
+ "name": "Python 3 - Default",
5
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
6
+ "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye",
7
+
8
+ // Features to add to the dev container. More info: https://containers.dev/features.
9
+ "features": {
10
+ "ghcr.io/devcontainers/features/node:1": {}
11
+ },
12
+
13
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
14
+ // "forwardPorts": [],
15
+
16
+ // Use 'postCreateCommand' to run commands after the container is created.
17
+ "postCreateCommand": "pip3 install -r requirements.txt -r .devcontainer/requirements.jax.txt",
18
+
19
+ // Configure tool-specific properties.
20
+ "customizations": {
21
+ "vscode": {
22
+ "extensions":[
23
+ "ms-python.python",
24
+ "ms-python.vscode-pylance"
25
+ ]
26
+ }
27
+ }
28
+
29
+ // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
30
+ // "remoteUser": "root"
31
+ }
.devcontainer/requirements.jax.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ jax
2
+ jaxlib
3
+ optax
4
+ orbax
.gitignore ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/linux,macos,windows,python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=linux,macos,windows,python
3
+
4
+ ### Linux ###
5
+ *~
6
+
7
+ # temporary files which can be created if a process still has a handle open of a deleted file
8
+ .fuse_hidden*
9
+
10
+ # KDE directory preferences
11
+ .directory
12
+
13
+ # Linux trash folder which might appear on any partition or disk
14
+ .Trash-*
15
+
16
+ # .nfs files are created when an open file is removed but is still being accessed
17
+ .nfs*
18
+
19
+ ### macOS ###
20
+ # General
21
+ .DS_Store
22
+ .AppleDouble
23
+ .LSOverride
24
+
25
+ # Icon must end with two \r
26
+ Icon
27
+
28
+
29
+ # Thumbnails
30
+ ._*
31
+
32
+ # Files that might appear in the root of a volume
33
+ .DocumentRevisions-V100
34
+ .fseventsd
35
+ .Spotlight-V100
36
+ .TemporaryItems
37
+ .Trashes
38
+ .VolumeIcon.icns
39
+ .com.apple.timemachine.donotpresent
40
+
41
+ # Directories potentially created on remote AFP share
42
+ .AppleDB
43
+ .AppleDesktop
44
+ Network Trash Folder
45
+ Temporary Items
46
+ .apdisk
47
+
48
+ ### macOS Patch ###
49
+ # iCloud generated files
50
+ *.icloud
51
+
52
+ ### Python ###
53
+ # Byte-compiled / optimized / DLL files
54
+ __pycache__/
55
+ *.py[cod]
56
+ *$py.class
57
+
58
+ # C extensions
59
+ *.so
60
+
61
+ # Distribution / packaging
62
+ .Python
63
+ build/
64
+ develop-eggs/
65
+ dist/
66
+ downloads/
67
+ eggs/
68
+ .eggs/
69
+ lib/
70
+ lib64/
71
+ parts/
72
+ sdist/
73
+ var/
74
+ wheels/
75
+ share/python-wheels/
76
+ *.egg-info/
77
+ .installed.cfg
78
+ *.egg
79
+ MANIFEST
80
+
81
+ # PyInstaller
82
+ # Usually these files are written by a python script from a template
83
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
84
+ *.manifest
85
+ *.spec
86
+
87
+ # Installer logs
88
+ pip-log.txt
89
+ pip-delete-this-directory.txt
90
+
91
+ # Unit test / coverage reports
92
+ htmlcov/
93
+ .tox/
94
+ .nox/
95
+ .coverage
96
+ .coverage.*
97
+ .cache
98
+ nosetests.xml
99
+ coverage.xml
100
+ *.cover
101
+ *.py,cover
102
+ .hypothesis/
103
+ .pytest_cache/
104
+ cover/
105
+
106
+ # Translations
107
+ *.mo
108
+ *.pot
109
+
110
+ # Django stuff:
111
+ *.log
112
+ local_settings.py
113
+ db.sqlite3
114
+ db.sqlite3-journal
115
+
116
+ # Flask stuff:
117
+ instance/
118
+ .webassets-cache
119
+
120
+ # Scrapy stuff:
121
+ .scrapy
122
+
123
+ # Sphinx documentation
124
+ docs/_build/
125
+
126
+ # PyBuilder
127
+ .pybuilder/
128
+ target/
129
+
130
+ # Jupyter Notebook
131
+ .ipynb_checkpoints
132
+
133
+ # IPython
134
+ profile_default/
135
+ ipython_config.py
136
+
137
+ # pyenv
138
+ # For a library or package, you might want to ignore these files since the code is
139
+ # intended to run in multiple environments; otherwise, check them in:
140
+ # .python-version
141
+
142
+ # pipenv
143
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
144
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
145
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
146
+ # install all needed dependencies.
147
+ #Pipfile.lock
148
+
149
+ # poetry
150
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
151
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
152
+ # commonly ignored for libraries.
153
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
154
+ #poetry.lock
155
+
156
+ # pdm
157
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
158
+ #pdm.lock
159
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
160
+ # in version control.
161
+ # https://pdm.fming.dev/#use-with-ide
162
+ .pdm.toml
163
+
164
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
165
+ __pypackages__/
166
+
167
+ # Celery stuff
168
+ celerybeat-schedule
169
+ celerybeat.pid
170
+
171
+ # SageMath parsed files
172
+ *.sage.py
173
+
174
+ # Environments
175
+ .env
176
+ .venv
177
+ env/
178
+ venv/
179
+ ENV/
180
+ env.bak/
181
+ venv.bak/
182
+
183
+ # Spyder project settings
184
+ .spyderproject
185
+ .spyproject
186
+
187
+ # Rope project settings
188
+ .ropeproject
189
+
190
+ # mkdocs documentation
191
+ /site
192
+
193
+ # mypy
194
+ .mypy_cache/
195
+ .dmypy.json
196
+ dmypy.json
197
+
198
+ # Pyre type checker
199
+ .pyre/
200
+
201
+ # pytype static type analyzer
202
+ .pytype/
203
+
204
+ # Cython debug symbols
205
+ cython_debug/
206
+
207
+ # PyCharm
208
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
209
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
210
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
211
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
212
+ #.idea/
213
+
214
+ ### Python Patch ###
215
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
216
+ poetry.toml
217
+
218
+ # ruff
219
+ .ruff_cache/
220
+
221
+ # LSP config files
222
+ pyrightconfig.json
223
+
224
+ ### Windows ###
225
+ # Windows thumbnail cache files
226
+ Thumbs.db
227
+ Thumbs.db:encryptable
228
+ ehthumbs.db
229
+ ehthumbs_vista.db
230
+
231
+ # Dump file
232
+ *.stackdump
233
+
234
+ # Folder config file
235
+ [Dd]esktop.ini
236
+
237
+ # Recycle Bin used on file shares
238
+ $RECYCLE.BIN/
239
+
240
+ # Windows Installer files
241
+ *.cab
242
+ *.msi
243
+ *.msix
244
+ *.msm
245
+ *.msp
246
+
247
+ # Windows shortcuts
248
+ *.lnk
249
+
250
+ # End of https://www.toptal.com/developers/gitignore/api/linux,macos,windows,python
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Adam Thorpe
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chladni Plate 2D Dataset
2
+
3
+ Numerical solutions to the 2D Chladni plate vibration equation.
4
+
5
+ ![Sample Plot](sample_plot.png)
6
+
7
+ ## Equation
8
+
9
+ The Chladni plate dataset models the steady-state response of a 2D vibrating plate to various forcing patterns. The mathematical formulation involves modal decomposition using cosine basis functions:
10
+
11
+ **Forcing function**:
12
+ ```
13
+ S(x,y) = Σₙ Σₘ α(n,m) cos(μₙx) cos(λₘy)
14
+ ```
15
+
16
+ **Displacement response**:
17
+ ```
18
+ Z(x,y) = Σₙ Σₘ α(n,m) Φ(n,m) cos(μₙx) cos(λₘy)
19
+ ```
20
+
21
+ **Mode factor**:
22
+ ```
23
+ Φ(n,m) = (v²/β(n,m)) × I(n,m) × (4/(LM)) × cos(μₙL/2)cos(λₘM/2)
24
+ ```
25
+
26
+ Where:
27
+ - μₙ = nπ/L, λₘ = mπ/M are spatial wavenumbers
28
+ - β(n,m) = √(μₙ² + λₘ² + 3v² - γ⁴)
29
+ - I(n,m) is a time integral: ∫₀ᵗ sin(ω(τ-t)) exp(-γ²+v²τ) sin(β(n,m)τ) dτ
30
+
31
+ ## Variables
32
+
33
+ The dataset returns a dictionary with the following fields:
34
+
35
+ ### Coordinates
36
+ - `spatial_coordinates`: `(numPoints², 2)` - Array of (x, y) coordinate pairs
37
+ - `X`: `(numPoints,)` - 1D array of x coordinates
38
+ - `Y`: `(numPoints,)` - 1D array of y coordinates
39
+
40
+ ### Solution Fields
41
+ - `forcing`: `(numPoints, numPoints)` - 2D forcing function S(x,y)
42
+ - `displacement`: `(numPoints, numPoints)` - 2D displacement response Z(x,y)
43
+ - `S`: `(numPoints²,)` - Flattened forcing function
44
+ - `Z`: `(numPoints²,)` - Flattened displacement response
45
+
46
+ ### Model Coefficients
47
+ - `alpha_coefficients`: `(n_range, m_range)` - Random forcing coefficients α(n,m)
48
+ - `alpha`: `(n_range × m_range,)` - Flattened coefficients
49
+
50
+ ### Physical Parameters
51
+ - `omega`: Angular frequency (rad/s)
52
+ - `frequency`: Driving frequency (Hz)
53
+ - `plate_length_x`: Plate length in x-direction (m)
54
+ - `plate_length_y`: Plate length in y-direction (m)
55
+ - `damping`: Damping parameter γ
56
+ - `velocity_param`: Velocity parameter v
57
+ - `evaluation_time`: Time at which solution is evaluated
58
+
59
+ ### Grid Parameters
60
+ - `grid_points`: Number of spatial grid points per dimension
61
+ - `n_modes`: Number of modes in x-direction
62
+ - `m_modes`: Number of modes in y-direction
63
+
64
+ ## Dataset Parameters
65
+
66
+ - **Domain**: [0, L] × [0, M] where L = M = 8.75 × 0.0254 m (square plate)
67
+ - **Grid points**: 50 × 50 (default)
68
+ - **Spatial resolution**: L/(numPoints-1) ≈ 4.5 mm
69
+ - **Mode range**: 10 × 10 modes (default)
70
+
71
+ ### Physical Parameters
72
+ - **Plate dimensions**: L = M = 8.75 × 0.0254 m ≈ 0.222 m
73
+ - **Driving frequency**: ω = 55π/M ≈ 778 rad/s (≈ 124 Hz)
74
+ - **Damping coefficient**: γ = 0.02
75
+ - **Velocity parameter**: v = 0.5
76
+ - **Evaluation time**: t = 4 s
77
+ - **Boundary conditions**: Free boundaries (cosine modes)
78
+
79
+ ## Physical Context
80
+
81
+ This dataset simulates the vibration patterns of a Chladni plate, a thin elastic plate that exhibits complex standing wave patterns when driven by acoustic forcing. The equation models the steady-state displacement response of the plate to various spatial forcing distributions.
82
+
83
+ Chladni plates are famous for creating beautiful geometric patterns (Chladni figures) when sand or powder is placed on the vibrating surface. The sand accumulates at nodal lines where the displacement is minimal, revealing the underlying mode shapes of the plate vibration.
84
+
85
+ This dataset is relevant for:
86
+ - Structural vibration analysis
87
+ - Acoustic wave propagation studies
88
+ - Modal analysis and system identification
89
+ - Pattern formation in physical systems
90
+ - Inverse problems in vibration engineering
91
+
92
+ The forcing-response relationship captured in this dataset allows for learning the complex mapping between spatial excitation patterns and the resulting displacement fields.
93
+
94
+ ## Usage
95
+
96
+ ```python
97
+ from dataset import Chladni2DDataset
98
+
99
+ # Create dataset
100
+ dataset = Chladni2DDataset(numPoints=50, n_range=10, m_range=10)
101
+
102
+ # Generate a sample
103
+ sample = next(iter(dataset))
104
+
105
+ # Access solution data
106
+ spatial_coords = sample["spatial_coordinates"]
107
+ forcing = sample["forcing"]
108
+ displacement = sample["displacement"]
109
+ frequency = sample["frequency"]
110
+ ```
111
+
112
+ ## Visualization
113
+
114
+ Run the plotting script to visualize samples:
115
+
116
+ ```bash
117
+ python plot_sample.py # Static visualization with imshow plots
118
+ ```
119
+
120
+ Note: Animation is not applicable for this dataset as it generates steady-state responses rather than time evolution.
121
+
122
+ ## Data Generation
123
+
124
+ Generate the full dataset:
125
+
126
+ ```bash
127
+ python generate_data.py
128
+ ```
129
+
130
+ This creates train/test splits saved as chunked parquet files in the `data/` directory.
data/test-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98abdfce50dd9e12ada75455439d69d5b771d15d30fda05cab490ec304ba0877
3
+ size 76902485
data/test-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:154b07124f9710c08c88382b81727cc09dfe2a5b7470c2f0e1b52d1d1c006c18
3
+ size 76829870
data/train-00000-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60bd5f40c888eb4d8d20cedc2d31d76f764202fc6390b5532b8e57e3d50d22f4
3
+ size 76965698
data/train-00001-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a99f3b00e0ddfa14f5fa3d5d3600655df619b4e2c06455e965c1f255c7fb28a5
3
+ size 76969096
data/train-00002-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e228482fb7687f4df4ce79093cf278f856794250e09898e2396dbc7d190c7edd
3
+ size 77012417
data/train-00003-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd796ee3d25417f6037e0f26373e1d08c83fc8204d482fc1e402184d54d197e
3
+ size 76865171
data/train-00004-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c465a560206aa0c7a09c0dcf1ec459af83f44260ddc5bca39766011e2d0f2ae
3
+ size 76996442
data/train-00005-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9a2c1b9c54962311911f65e26e2875b692c2878392afc4f70f4f24242f1459a
3
+ size 76914573
data/train-00006-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef476917ac38b0301968f216d151fddf355373d889a9afd3bf4a79f04387aa39
3
+ size 76855112
data/train-00007-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d748794c1b83c2721c1fb789798a884da7e70f3d779edfa1c27c9fc6b323cb7
3
+ size 76828512
data/train-00008-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c7de2e7db822a8045da5ead19fc5d98ffa4d062d8f576835743f49371ca093d
3
+ size 76986430
data/train-00009-of-00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55caa8d6496384f64bda833d3305ec0ffabff4f5362e3e9f359f89a9d4d8251f
3
+ size 76855757
dataset.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chladni Plate 2D Dataset - Generate forcing samples and displacement responses
3
+ Uses PyTorch IterableDataset for on-demand sample generation.
4
+
5
+ This dataset models the 2D Chladni plate vibration equation, which describes
6
+ the displacement response of a vibrating plate to various forcing patterns.
7
+ The equation governs the steady-state response of a damped, driven plate.
8
+
9
+ Mathematical formulation:
10
+ - Forcing function: S(x,y) = Σ Σ α(n,m) cos(μₙx) cos(λₘy)
11
+ - Displacement response: Z(x,y) = Σ Σ α(n,m) Φ(n,m) cos(μₙx) cos(λₘy)
12
+ - Mode factor: Φ(n,m) = (v²/β(n,m)) * I(n,m) * (4/(LM)) * cos(μₙL/2)cos(λₘM/2)
13
+
14
+ Where:
15
+ - μₙ = nπ/L, λₘ = mπ/M are spatial wavenumbers
16
+ - β(n,m) = √(μₙ² + λₘ² + 3v² - γ⁴)
17
+ - I(n,m) is a time integral involving the forcing frequency ω
18
+ """
19
+
20
+ import numpy as np
21
+ from torch.utils.data import IterableDataset
22
+ from scipy import integrate
23
+ import logging
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class Chladni2DDataset(IterableDataset):
29
+ """
30
+ Dataset for 2D Chladni plate vibration simulations.
31
+
32
+ Generates random forcing patterns and computes the corresponding
33
+ displacement responses using analytical mode decomposition. Each sample
34
+ consists of a random linear combination of cosine modes for the forcing
35
+ function and the resulting steady-state displacement response.
36
+
37
+ The dataset implements the analytical solution to the 2D Chladni plate
38
+ equation with damping and frequency-dependent forcing. This allows for
39
+ efficient generation of large numbers of forcing-response pairs without
40
+ numerical time integration.
41
+
42
+ Args:
43
+ numPoints (int): Number of grid points in each spatial dimension (default: 50)
44
+ n_range (int): Number of modes in x-direction (default: 10)
45
+ m_range (int): Number of modes in y-direction (default: 10)
46
+ L (float): Plate length in x-direction in meters (default: 8.75 * 0.0254 m)
47
+ M (float): Plate length in y-direction in meters (default: 8.75 * 0.0254 m)
48
+ omega_factor (float): Frequency scaling factor (default: 55)
49
+ t_fixed (float): Time at which to evaluate solution in seconds (default: 4)
50
+ gamma (float): Damping parameter (default: 0.02)
51
+ v (float): Velocity parameter (default: 0.5)
52
+
53
+ Example:
54
+ >>> dataset = Chladni2DDataset(numPoints=50)
55
+ >>> sample = next(iter(dataset))
56
+ >>> forcing = sample["forcing"] # 2D forcing pattern
57
+ >>> displacement = sample["displacement"] # 2D displacement response
58
+ """
59
+ def __init__(
60
+ self,
61
+ numPoints=50,
62
+ n_range=10,
63
+ m_range=10,
64
+ L=8.75 * 0.0254,
65
+ M=8.75 * 0.0254,
66
+ omega_factor=55,
67
+ t_fixed=4,
68
+ gamma=0.02,
69
+ v=0.5,
70
+ ):
71
+ super().__init__()
72
+
73
+ # Store parameters
74
+ self.numPoints = numPoints
75
+ self.n_range = n_range
76
+ self.m_range = m_range
77
+ self.L = L
78
+ self.M = M
79
+ self.omega = omega_factor * np.pi / M
80
+ self.t_fixed = t_fixed
81
+ self.gamma = gamma
82
+ self.v = v
83
+
84
+ # Create spatial grid
85
+ self.x = np.linspace(0, L, numPoints)
86
+ self.y = np.linspace(0, M, numPoints)
87
+
88
+ # Create coordinate arrays for consistent format
89
+ X, Y = np.meshgrid(self.x, self.y, indexing='ij')
90
+ self.coordinates = np.column_stack((X.ravel(), Y.ravel()))
91
+
92
+ # Precompute all terms that don't depend on alpha
93
+ self._precompute_terms()
94
+
95
+ def _precompute_terms(self):
96
+ """
97
+ Precompute all terms that don't depend on alpha coefficients.
98
+
99
+ This method calculates and stores the spatial mode shapes, temporal
100
+ integration factors, and mode coefficients that are independent of
101
+ the random forcing coefficients. This optimization allows for efficient
102
+ sample generation by avoiding repeated computation of these terms.
103
+
104
+ Precomputed terms include:
105
+ - Spatial wavenumbers μₙ and λₘ
106
+ - Cosine mode shapes in x and y directions
107
+ - Center factors for boundary conditions
108
+ - Modal frequencies β(n,m)
109
+ - Time integrals I(n,m)
110
+ - Combined mode factors Φ(n,m)
111
+ """
112
+ # Wave numbers mu(n), lambda(m)
113
+ self.mu_vals = np.arange(1, self.n_range + 1) * np.pi / self.L
114
+ self.lam_vals = np.arange(1, self.m_range + 1) * np.pi / self.M
115
+
116
+ # cosX(n,i) = cos( mu_vals(n) * x(i) ) - vectorized
117
+ self.cosX = np.cos(self.mu_vals[:, np.newaxis] * self.x[np.newaxis, :])
118
+
119
+ # cosY(m,j) = cos( lam_vals(m) * y(j) ) - vectorized
120
+ self.cosY = np.cos(self.lam_vals[:, np.newaxis] * self.y[np.newaxis, :])
121
+
122
+ # centerFactor(n,m) = cos(mu_n*(L/2)) * cos(lam_m*(M/2)) - vectorized
123
+ self.centerFactor = np.cos(self.mu_vals[:, np.newaxis] * (self.L / 2)) * np.cos(
124
+ self.lam_vals[np.newaxis, :] * (self.M / 2)
125
+ )
126
+
127
+ # beta(n,m) = sqrt(mu^2 + lam^2 + 3*v^2 - gamma^4) - vectorized
128
+ beta_squared = (
129
+ self.mu_vals[:, np.newaxis] ** 2
130
+ + self.lam_vals[np.newaxis, :] ** 2
131
+ + 3 * self.v**2
132
+ - self.gamma**4
133
+ )
134
+ self.beta_nm = np.sqrt(np.maximum(beta_squared, 1e-12))
135
+
136
+ # timeInt(n,m) = integral from 0 to t
137
+ self.timeInt = np.zeros((self.n_range, self.m_range))
138
+ for n in range(self.n_range):
139
+ for m in range(self.m_range):
140
+ current_beta = self.beta_nm[n, m]
141
+
142
+ def integrand(tau):
143
+ return (
144
+ np.sin(self.omega * (tau - self.t_fixed))
145
+ * np.exp(-self.gamma**2 + self.v**2 * tau)
146
+ * np.sin(current_beta * tau)
147
+ )
148
+
149
+ try:
150
+ self.timeInt[n, m], _ = integrate.quad(
151
+ integrand, 0, self.t_fixed,
152
+ limit=100, # Increase subdivision limit
153
+ epsabs=1e-8, epsrel=1e-8 # Set tolerance
154
+ )
155
+ except integrate.IntegrationWarning:
156
+ # If integration fails, use a simpler approximation
157
+ self.timeInt[n, m] = 0.1 * np.random.random()
158
+
159
+ # modeFactor(n,m)
160
+ self.modeFactor = np.zeros((self.n_range, self.m_range))
161
+ for n in range(self.n_range):
162
+ for m in range(self.m_range):
163
+ self.modeFactor[n, m] = (
164
+ (self.v**2 / self.beta_nm[n, m])
165
+ * self.timeInt[n, m]
166
+ * (4 / (self.L * self.M))
167
+ * self.centerFactor[n, m]
168
+ )
169
+
170
+ def __iter__(self):
171
+ """
172
+ Generate infinite samples from the dataset.
173
+
174
+ Each iteration generates a new random forcing pattern by sampling
175
+ Gaussian-distributed coefficients for the modal expansion, then
176
+ computes the corresponding displacement response using the
177
+ precomputed mode factors.
178
+
179
+ Yields:
180
+ dict: Sample dictionary containing forcing patterns, displacement
181
+ responses, spatial coordinates, and physical parameters.
182
+ """
183
+ while True:
184
+ # Generate random alpha coefficients for forcing
185
+ alpha_k = 0.01 * np.random.randn(self.n_range, self.m_range)
186
+
187
+ # Solve for forcing and displacement
188
+ yield self.solve(alpha_k)
189
+
190
+ def solve(self, alpha_k):
191
+ """
192
+ Compute forcing S and displacement Z from alpha coefficients.
193
+
194
+ Given a set of modal coefficients, this method reconstructs the
195
+ spatial forcing pattern and computes the corresponding steady-state
196
+ displacement response using the analytical solution.
197
+
198
+ Args:
199
+ alpha_k (np.ndarray): Random forcing coefficients with shape
200
+ (n_range, m_range). These coefficients weight
201
+ the contribution of each (n,m) mode.
202
+
203
+ Returns:
204
+ dict: Comprehensive sample dictionary containing:
205
+ - Spatial coordinates and grid information
206
+ - 2D forcing and displacement fields
207
+ - Flattened versions for compatibility
208
+ - Modal coefficients and physical parameters
209
+ - Grid and solver metadata
210
+ """
211
+ # Vectorized computation using matrix operations
212
+ # S_k[i,j] = sum_n sum_m alpha_k[n,m] * cosX[n,i] * cosY[m,j]
213
+ S_k = self.cosX.T @ alpha_k @ self.cosY
214
+
215
+ # Z_k[i,j] = sum_n sum_m alpha_k[n,m] * cosX[n,i] * cosY[m,j] * modeFactor[n,m]
216
+ Z_k = self.cosX.T @ (alpha_k * self.modeFactor) @ self.cosY
217
+
218
+ return {
219
+ # Spatial coordinates in consistent format
220
+ "spatial_coordinates": self.coordinates, # Shape: (numPoints^2, 2)
221
+ "X": self.x, # 1D array of x coordinates
222
+ "Y": self.y, # 1D array of y coordinates
223
+
224
+ # Forcing and response fields
225
+ "forcing": S_k, # 2D forcing function, shape (numPoints, numPoints)
226
+ "displacement": Z_k, # 2D displacement response, shape (numPoints, numPoints)
227
+
228
+ # Flattened versions for compatibility
229
+ "S": S_k.flatten(), # Flattened forcing
230
+ "Z": Z_k.flatten(), # Flattened displacement
231
+
232
+ # Model coefficients and parameters
233
+ "alpha_coefficients": alpha_k, # Original coefficients, shape (n_range, m_range)
234
+ "alpha": alpha_k.flatten(), # Flattened coefficients
235
+
236
+ # Physical parameters
237
+ "omega": self.omega,
238
+ "frequency": self.omega / (2 * np.pi), # Hz
239
+ "plate_length_x": self.L,
240
+ "plate_length_y": self.M,
241
+ "damping": self.gamma,
242
+ "velocity_param": self.v,
243
+ "evaluation_time": self.t_fixed,
244
+
245
+ # Grid parameters
246
+ "grid_points": self.numPoints,
247
+ "n_modes": self.n_range,
248
+ "m_modes": self.m_range,
249
+ }
generate_data.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Generate Chladni plate dataset and save to parquet files in chunks.
4
+ """
5
+
6
+ import os
7
+ import numpy as np
8
+ import pyarrow as pa
9
+ import pyarrow.parquet as pq
10
+ from dataset import Chladni2DDataset
11
+
12
+
13
+ def generate_dataset_split(
14
+ split_name="train", num_samples=1000, chunk_size=100, output_dir="data"
15
+ ):
16
+ """Generate a dataset split and save as chunked parquet files."""
17
+
18
+ os.makedirs(output_dir, exist_ok=True)
19
+
20
+ dataset = Chladni2DDataset()
21
+ num_chunks = (num_samples + chunk_size - 1) // chunk_size # Ceiling division
22
+
23
+ print(f"Generating {num_samples} {split_name} samples in {num_chunks} chunks...")
24
+
25
+ dataset_iter = iter(dataset)
26
+ chunk_data = None
27
+
28
+ for i in range(num_samples):
29
+ sample = next(dataset_iter)
30
+
31
+ if chunk_data is None:
32
+ # Initialize chunk data on first sample
33
+ chunk_data = {key: [] for key in sample.keys()}
34
+
35
+ # Add sample to current chunk
36
+ for key, value in sample.items():
37
+ chunk_data[key].append(value)
38
+
39
+ # Save chunk when full or at end
40
+ if (i + 1) % chunk_size == 0 or i == num_samples - 1:
41
+ chunk_idx = i // chunk_size
42
+
43
+ # Convert numpy arrays to lists for PyArrow compatibility
44
+ table_data = {}
45
+ for key, values in chunk_data.items():
46
+ if hasattr(values[0], 'tolist'):
47
+ # Handle numpy arrays
48
+ table_data[key] = [arr.tolist() for arr in values]
49
+ else:
50
+ # Handle scalars and other types
51
+ table_data[key] = values
52
+
53
+ # Convert to PyArrow table
54
+ table = pa.table(table_data)
55
+
56
+ # Save chunk
57
+ filename = f"{split_name}-{chunk_idx:05d}-of-{num_chunks:05d}.parquet"
58
+ filepath = os.path.join(output_dir, filename)
59
+ pq.write_table(table, filepath)
60
+
61
+ print(f"Saved chunk {chunk_idx + 1}/{num_chunks}: {filepath}")
62
+
63
+ # Reset for next chunk
64
+ chunk_data = {key: [] for key in sample.keys()}
65
+
66
+ print(f"Generated {num_samples} {split_name} samples")
67
+ return num_samples
68
+
69
+
70
+ if __name__ == "__main__":
71
+ np.random.seed(42)
72
+
73
+ # Generate train split
74
+ generate_dataset_split("train", num_samples=10000, chunk_size=1000)
75
+
76
+ # Generate test split
77
+ generate_dataset_split("test", num_samples=2000, chunk_size=1000)
plot_sample.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Plot a single sample from the Chladni 2D dataset.
4
+
5
+ Visualizes the forcing function and displacement response patterns
6
+ for a single Chladni plate sample using imshow plots.
7
+ """
8
+
9
+ import numpy as np
10
+ import matplotlib.pyplot as plt
11
+ from dataset import Chladni2DDataset
12
+
13
+
14
+ def plot_chladni_sample(sample, save_path="sample_plot.png"):
15
+ """
16
+ Plot a single sample from the Chladni 2D dataset.
17
+
18
+ Creates side-by-side imshow plots of the forcing function and
19
+ displacement response patterns. The forcing function shows the
20
+ spatial distribution of applied forces, while the displacement
21
+ response shows the resulting vibration pattern of the plate.
22
+
23
+ Args:
24
+ sample (dict): Dictionary returned by Chladni2DDataset containing
25
+ forcing patterns, displacement responses, and metadata
26
+ save_path (str): Path to save the plot image (default: "sample_plot.png")
27
+
28
+ Example:
29
+ >>> dataset = Chladni2DDataset()
30
+ >>> sample = next(iter(dataset))
31
+ >>> plot_chladni_sample(sample, "my_plot.png")
32
+ Saved plot: my_plot.png
33
+ """
34
+ # Extract data
35
+ forcing = sample["forcing"] # 2D forcing function
36
+ displacement = sample["displacement"] # 2D displacement response
37
+ frequency = sample["frequency"]
38
+
39
+ # Create figure with subplots
40
+ fig, axs = plt.subplots(1, 2, figsize=(15, 6), facecolor="white")
41
+
42
+ # Plot 1: Forcing function S
43
+ ax1 = axs[0]
44
+ ax1.set_facecolor("white")
45
+ im1 = ax1.imshow(
46
+ forcing.T,
47
+ extent=[0, sample["plate_length_x"], 0, sample["plate_length_y"]],
48
+ origin='lower',
49
+ cmap="viridis",
50
+ aspect='equal'
51
+ )
52
+ ax1.set_xlabel("X axis (m)", fontsize=14)
53
+ ax1.set_ylabel("Y axis (m)", fontsize=14)
54
+ ax1.set_title("Forcing Function S(x,y)", fontsize=16, fontweight="bold")
55
+ ax1.tick_params(labelsize=12)
56
+ ax1.grid(True, alpha=0.3)
57
+
58
+ # Add colorbar with proper labels
59
+ cbar1 = plt.colorbar(im1, ax=ax1)
60
+ cbar1.set_label("Forcing Amplitude", rotation=270, labelpad=25, fontsize=14)
61
+ cbar1.ax.tick_params(labelsize=12)
62
+
63
+ # Plot 2: Displacement response Z
64
+ ax2 = axs[1]
65
+ ax2.set_facecolor("white")
66
+ im2 = ax2.imshow(
67
+ displacement.T,
68
+ extent=[0, sample["plate_length_x"], 0, sample["plate_length_y"]],
69
+ origin='lower',
70
+ cmap="RdBu_r",
71
+ aspect='equal'
72
+ )
73
+ ax2.set_xlabel("X axis (m)", fontsize=14)
74
+ ax2.set_ylabel("Y axis (m)", fontsize=14)
75
+ ax2.set_title(
76
+ f"Displacement Response Z(x,y), f = {frequency:.2f} Hz",
77
+ fontsize=16,
78
+ fontweight="bold",
79
+ )
80
+ ax2.tick_params(labelsize=12)
81
+ ax2.grid(True, alpha=0.3)
82
+
83
+ # Add colorbar
84
+ cbar2 = plt.colorbar(im2, ax=ax2)
85
+ cbar2.set_label("Displacement Amplitude", rotation=270, labelpad=25, fontsize=14)
86
+ cbar2.ax.tick_params(labelsize=12)
87
+
88
+ plt.tight_layout()
89
+
90
+ # Save the plot
91
+ plt.savefig(save_path, facecolor="white", dpi=150, bbox_inches="tight")
92
+ print(f"Saved plot: {save_path}")
93
+
94
+ plt.show()
95
+ plt.close(fig)
96
+
97
+
98
+ if __name__ == "__main__":
99
+ """
100
+ Generate and visualize a sample from the Chladni 2D dataset.
101
+
102
+ This script creates a dataset instance, generates a single sample,
103
+ prints information about the sample structure, and creates a
104
+ visualization showing both the forcing pattern and displacement response.
105
+ """
106
+ # Set random seed for reproducibility
107
+ np.random.seed(42)
108
+
109
+ print("Chladni 2D Dataset Visualization")
110
+ print("=" * 35)
111
+
112
+ # Create dataset instance
113
+ dataset = Chladni2DDataset(numPoints=50, n_range=10, m_range=10)
114
+
115
+ print(f"Dataset configuration:")
116
+ print(f" Grid resolution: {dataset.numPoints}x{dataset.numPoints}")
117
+ print(f" Modal basis: {dataset.n_range}x{dataset.m_range}")
118
+ print(f" Plate dimensions: {dataset.L:.3f}x{dataset.M:.3f} m")
119
+ print(f" Driving frequency: {dataset.omega/(2*np.pi):.1f} Hz")
120
+
121
+ # Generate a single sample
122
+ dataset_iter = iter(dataset)
123
+ sample = next(dataset_iter)
124
+ sample = next(dataset_iter)
125
+
126
+ print(f"\nSample structure:")
127
+ print("-" * 20)
128
+ for key, value in sample.items():
129
+ if hasattr(value, 'shape'):
130
+ print(f" {key:20s}: {str(value.shape):15s}")
131
+ else:
132
+ print(f" {key:20s}: {type(value).__name__} = {value}")
133
+
134
+ # Plot the sample
135
+ print(f"\nGenerating visualization...")
136
+ plot_chladni_sample(sample)
137
+ print("Visualization complete!")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ numpy
2
+ torch
3
+ scikit-learn
4
+ matplotlib
5
+ pyarrow
6
+ pillow
7
+ tqdm
sample_plot.png ADDED

Git LFS Details

  • SHA256: c5afb4aab6e4f0383b32ce5470f57efac9837a819ca693276154532677548fd0
  • Pointer size: 131 Bytes
  • Size of remote file: 138 kB