Hang917 commited on
Commit
4c61b7c
·
1 Parent(s): 1bab015

UPDATE:init mppi file and infra, loaded model success

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.pyc
__init__.py ADDED
File without changes
logs/run-20250920_075505-k227j0rs/checkpoint_step_210000_20250920_144806.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1661992b3b0eada2f7960db6019c0901dd170cf81cb0ed9ac2af6c24af13b176
3
+ size 1948153
logs/run-20250920_075505-k227j0rs/config.yaml ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.21.0
4
+ e:
5
+ w6ksdvd23g5609btg3agj7flfwr4evlf:
6
+ args:
7
+ - --config
8
+ - ICLR/config/bb6/bb6_v2.yaml
9
+ - --gpu_id
10
+ - "2"
11
+ codePath: main_cheetah.py
12
+ codePathLocal: main_cheetah.py
13
+ cpu_count: 128
14
+ cpu_count_logical: 255
15
+ cudaVersion: "12.6"
16
+ disk:
17
+ /:
18
+ total: "6598647398400"
19
+ used: "2392582606848"
20
+ email: sangliteng@gmail.com
21
+ executable: /home/sangliteng/miniconda3/envs/learning-hybrid-systems/bin/python3
22
+ git:
23
+ commit: e65e9632c7a9d9bc1847e0a5a83e8e29db0ac56e
24
+ remote: git@github.com:SangliTeng/Leaning-Hybrid-Systems.git
25
+ gpu: NVIDIA RTX 6000 Ada Generation
26
+ gpu_count: 8
27
+ gpu_nvidia:
28
+ - architecture: Ada
29
+ cudaCores: 18176
30
+ memoryTotal: "51527024640"
31
+ name: NVIDIA RTX 6000 Ada Generation
32
+ uuid: GPU-45d30378-435b-de16-3aea-9fc48527fe61
33
+ - architecture: Ada
34
+ cudaCores: 18176
35
+ memoryTotal: "51527024640"
36
+ name: NVIDIA RTX 6000 Ada Generation
37
+ uuid: GPU-19a03a90-a9e0-a194-8d43-c2dcb7925140
38
+ - architecture: Ada
39
+ cudaCores: 18176
40
+ memoryTotal: "51527024640"
41
+ name: NVIDIA RTX 6000 Ada Generation
42
+ uuid: GPU-ea5b1c7d-baf5-6bcb-1ce1-0ee9ca4b5c8f
43
+ - architecture: Ada
44
+ cudaCores: 18176
45
+ memoryTotal: "51527024640"
46
+ name: NVIDIA RTX 6000 Ada Generation
47
+ uuid: GPU-b1a2e98c-e563-a0fe-47ce-cfa29028d5c7
48
+ - architecture: Ada
49
+ cudaCores: 18176
50
+ memoryTotal: "51527024640"
51
+ name: NVIDIA RTX 6000 Ada Generation
52
+ uuid: GPU-208eeaba-0174-d4e0-bc7a-2eb5f7983a6e
53
+ - architecture: Ada
54
+ cudaCores: 18176
55
+ memoryTotal: "51527024640"
56
+ name: NVIDIA RTX 6000 Ada Generation
57
+ uuid: GPU-81a0e787-8873-418d-6ff3-e3f59deb75a0
58
+ - architecture: Ada
59
+ cudaCores: 18176
60
+ memoryTotal: "51527024640"
61
+ name: NVIDIA RTX 6000 Ada Generation
62
+ uuid: GPU-8619099e-16b4-d667-b97f-518c3954df8c
63
+ - architecture: Ada
64
+ cudaCores: 18176
65
+ memoryTotal: "51527024640"
66
+ name: NVIDIA RTX 6000 Ada Generation
67
+ uuid: GPU-8042fac2-fd28-c8e9-668b-ceb33605fb49
68
+ host: hr-6000ada
69
+ memory:
70
+ total: "811164614656"
71
+ os: Linux-5.15.0-143-generic-x86_64-with-glibc2.35
72
+ program: /home/sangliteng/Research/Leaning-Hybrid-Systems/main_cheetah.py
73
+ python: CPython 3.12.11
74
+ root: ./ICLR/bb6
75
+ startedAt: "2025-09-20T07:55:05.803083Z"
76
+ writerId: w6ksdvd23g5609btg3agj7flfwr4evlf
77
+ m: []
78
+ python_version: 3.12.11
79
+ t:
80
+ "1":
81
+ - 1
82
+ "2":
83
+ - 1
84
+ "3":
85
+ - 2
86
+ - 13
87
+ - 15
88
+ - 16
89
+ "4": 3.12.11
90
+ "5": 0.21.0
91
+ "12": 0.21.0
92
+ "13": linux-x86_64
93
+ anti_collapse_weight:
94
+ value: 1000
95
+ batch_size:
96
+ value: 4096
97
+ data_path_test:
98
+ value: None
99
+ data_path_train:
100
+ value: /home/sangliteng/Research/DynaTraj/dataset/bb/bb_shift_down_sample.npz
101
+ decoder_batch_size:
102
+ value: 131072
103
+ decoder_finetune_steps:
104
+ value: 200000
105
+ decoder_lr:
106
+ value: 0.01
107
+ default_activation:
108
+ value: ReLU
109
+ dim_linear_in_decoder:
110
+ value:
111
+ - 0
112
+ - 0
113
+ dim_linear_in_encoder:
114
+ value:
115
+ - 0
116
+ - 0
117
+ dim_linear_in_vec_field:
118
+ value:
119
+ - 0
120
+ - 0
121
+ dim_linear_out_decoder:
122
+ value: 0
123
+ dim_linear_out_encoder:
124
+ value: 0
125
+ dim_linear_out_vec_field:
126
+ value: 0
127
+ dynamics_init_scale:
128
+ value: 0.005
129
+ dynamics_loss_type:
130
+ value: l2
131
+ dynamics_weight:
132
+ value: 10
133
+ encoder_lr:
134
+ value: 0.01
135
+ eval_batch_size:
136
+ value: 64
137
+ eval_every:
138
+ value: 2.5e+22
139
+ eval_trajectory_length:
140
+ value: 500
141
+ except_features:
142
+ value: []
143
+ external_input_dim:
144
+ value: 6
145
+ hidden_dim_linear_decoder:
146
+ value: []
147
+ hidden_dim_linear_encoder:
148
+ value: []
149
+ hidden_dim_linear_vec_field:
150
+ value: []
151
+ hidden_dims_dec:
152
+ value:
153
+ - 128
154
+ - 128
155
+ - 128
156
+ - 128
157
+ - 128
158
+ - 128
159
+ - 128
160
+ - 128
161
+ hidden_dims_enc:
162
+ value:
163
+ - 64
164
+ - 64
165
+ - 64
166
+ hidden_dims_vector_field:
167
+ value:
168
+ - 128
169
+ - 128
170
+ input_dim:
171
+ value: 18
172
+ is_lagrangian_system:
173
+ value: true
174
+ isometry_loss_weight:
175
+ value: 1
176
+ latent_dim:
177
+ value: 30
178
+ learning_rate:
179
+ value: 0.0005
180
+ log_interval:
181
+ value: 50
182
+ loss_mode:
183
+ value: z
184
+ max_iso_samples:
185
+ value: 16384
186
+ min_covariance_threshold:
187
+ value: 0.09
188
+ model_type:
189
+ value: hybrid
190
+ normalize_data:
191
+ value: false
192
+ ode_method:
193
+ value: rk4
194
+ project_name:
195
+ value: debug architecture
196
+ reconstruction_loss_type:
197
+ value: l2
198
+ run_name:
199
+ value: bb with paddle
200
+ save_checkpoint_every:
201
+ value: 250
202
+ smooth_budget:
203
+ value: 0.0001
204
+ smooth_weight:
205
+ value: 0
206
+ steps_per_length:
207
+ value: 2000
208
+ switching_threshold_scale:
209
+ value: 1.5
210
+ switching_weight_multiplier:
211
+ value: 2
212
+ test_info:
213
+ value: same profile as bb, much larger network
214
+ time_step:
215
+ value: 0.01
216
+ train_test_ratio:
217
+ value: 0.95
218
+ trajectory_lengths:
219
+ value:
220
+ - 80
221
+ - 150
222
+ - 200
223
+ - 200
224
+ - 200
225
+ use_switching_weights:
226
+ value: true
227
+ use_weight_smoothing:
228
+ value: false
229
+ vector_field_lr:
230
+ value: 0.01
231
+ viz_interval:
232
+ value: 50
233
+ wandb_base_dir:
234
+ value: ./ICLR/bb6
235
+ weight_smoothing_window:
236
+ value: 0
237
+ z_continuity_weight:
238
+ value: 10
models/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Models Package for Neural Hybrid Systems
3
+
4
+ This package contains all neural network architectures and model utilities.
5
+ """
6
+
7
+ from .factory import create_model
8
+ from .utils import interpolate_trajectory, interpolate_external_input, create_vector_field_with_external_input
9
+ from .architectures import HybridLatentODEModel, VanillaODEModel, EventODEModel
10
+ from .base import BaseModel
11
+ from .components import MLPWithCustomInit
12
+ from .components.initialization import (
13
+ init_dynamics_network, init_autoencoder_network,
14
+ init_event_network, init_reset_network
15
+ )
16
+
17
+ # Backward compatibility imports
18
+ from .factory import create_hybrid_models, create_neural_ode_models, create_event_ode_models
19
+
20
+ __all__ = [
21
+ 'create_model', 'interpolate_trajectory',
22
+ 'interpolate_external_input', 'create_vector_field_with_external_input',
23
+ 'HybridLatentODEModel', 'VanillaODEModel', 'EventODEModel',
24
+ 'BaseModel', 'MLPWithCustomInit',
25
+ 'init_dynamics_network', 'init_autoencoder_network',
26
+ 'init_event_network', 'init_reset_network',
27
+ # Backward compatibility
28
+ 'create_hybrid_models', 'create_neural_ode_models', 'create_event_ode_models',
29
+ ]
models/architectures/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model Architectures Package
3
+ """
4
+
5
+ from .hybrid import HybridLatentODEModel
6
+ from .vanilla_ode import VanillaODEModel
7
+ from .event_ode import EventODEModel
8
+ from .augmented_ode import AugmentedODEModel
9
+ from .autoregressive import AutoregressiveModel
10
+
11
+ __all__ = [
12
+ 'HybridLatentODEModel',
13
+ 'VanillaODEModel',
14
+ 'EventODEModel',
15
+ 'AugmentedODEModel',
16
+ 'AutoregressiveModel',
17
+ ]
models/architectures/augmented_ode.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Augmented Neural ODE Model Architecture
3
+
4
+ Augmented ODE extends vanilla ODE by appending encoded features to the state space.
5
+ The vector field operates on [x; aug(x); u] where aug(x) = encoder(x) and u is external input.
6
+ Only the original x dimensions are used for reconstruction loss.
7
+ """
8
+
9
+ import torch
10
+ import torch.optim as optim
11
+ import itertools
12
+ import torchdiffeq
13
+
14
+ from ..base import BaseModel
15
+ from ..components.mlp import MLPWithCustomInit
16
+ from ..components.initialization import init_autoencoder_network, init_dynamics_network
17
+ from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
18
+
19
+
20
+ class AugmentedODEModel(BaseModel):
21
+ def __init__(self, config, device):
22
+ super().__init__(config, device)
23
+ input_dim = config['input_dim']
24
+ hidden_dims_enc = config.get('hidden_dims_enc', [32, 64])
25
+ hidden_dims_vector_field = config['hidden_dims_vector_field']
26
+
27
+ # 默认 aug_dim = input_dim,即增强维度与原始状态维度相同
28
+ aug_dim = config.get('aug_dim', input_dim) # 这里是关键!
29
+
30
+ # External input support
31
+ external_input_dim = config.get('external_input_dim', 0)
32
+
33
+ # Total dimension for vector field: original x + augmented features + external input
34
+ total_dim = input_dim + aug_dim + external_input_dim
35
+ vector_field_output_dim = input_dim + aug_dim # Only predict derivatives for state + augmented
36
+
37
+ # Get activation functions
38
+ encoder_activations = config.get('encoder_activations', None)
39
+ vector_field_activations = config.get('vector_field_activations', None)
40
+ default_activation = getattr(torch.nn, config.get('default_activation', 'ReLU'))
41
+
42
+ # Create encoder for augmentation: x -> aug(x)
43
+ self.encoder = MLPWithCustomInit(
44
+ input_dim, hidden_dims_enc, aug_dim,
45
+ activation=default_activation,
46
+ activation_per_layer=encoder_activations
47
+ ).to(device)
48
+
49
+ # Create vector field for augmented space: [x; aug(x); u] -> d/dt[x; aug(x)]
50
+ self.vector_field = MLPWithCustomInit(
51
+ total_dim, hidden_dims_vector_field, vector_field_output_dim,
52
+ activation=default_activation,
53
+ activation_per_layer=vector_field_activations
54
+ ).to(device)
55
+
56
+ # Initialize networks
57
+ init_autoencoder_network(self.encoder)
58
+ init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
59
+
60
+ # Create optimizers with different learning rates
61
+ self.optimizers = {
62
+ 'encoder': optim.Adam(self.encoder.parameters(),
63
+ lr=config.get('encoder_lr', config['learning_rate'])),
64
+ 'vector_field': optim.Adam(self.vector_field.parameters(),
65
+ lr=config.get('vector_field_lr', config['learning_rate']))
66
+ }
67
+
68
+ # Print architecture information
69
+ self.encoder.print_architecture("Augmentation Encoder")
70
+ self.vector_field.print_architecture("Augmented Vector Field")
71
+
72
+ # Store augmentation dimension in config
73
+ config['aug_dim'] = aug_dim
74
+ config['total_dim'] = input_dim + aug_dim # State space dimension
75
+ config['vector_field_input_dim'] = total_dim # Including external input
76
+
77
+ print(f"Augmented ODE Configuration:")
78
+ print(f" Original dimension: {input_dim}")
79
+ print(f" Augmentation dimension: {aug_dim}")
80
+ print(f" External input dimension: {external_input_dim}")
81
+ print(f" Total state dimension: {input_dim + aug_dim}")
82
+ print(f" Vector field input dimension: {total_dim}")
83
+
84
+ total_params = sum(p.numel() for p in itertools.chain(
85
+ self.encoder.parameters(), self.vector_field.parameters()))
86
+ print(f"Total parameters: {total_params}")
87
+
88
+ def _augment_state(self, x):
89
+ """
90
+ Augment state x with encoded features.
91
+
92
+ Args:
93
+ x: State tensor [B, input_dim] or [T, B, input_dim]
94
+
95
+ Returns:
96
+ x_aug: Augmented state [B, total_dim] or [T, B, total_dim]
97
+ """
98
+ original_shape = x.shape
99
+
100
+ # Flatten to [N, input_dim] for encoder
101
+ if len(original_shape) == 3: # [T, B, input_dim]
102
+ T, B, D = original_shape
103
+ x_flat = x.reshape(T * B, D)
104
+ else: # [B, input_dim]
105
+ x_flat = x
106
+
107
+ # Encode to get augmentation features
108
+ aug_features = self.encoder([], x_flat) # [N, aug_dim]
109
+
110
+ # Concatenate original state with augmented features
111
+ x_aug_flat = torch.cat([x_flat, aug_features], dim=1) # [N, total_dim]
112
+
113
+ # Reshape back to original structure
114
+ if len(original_shape) == 3:
115
+ x_aug = x_aug_flat.reshape(T, B, self.config['total_dim'])
116
+ else:
117
+ x_aug = x_aug_flat
118
+
119
+ return x_aug
120
+
121
+ def _extract_original_state(self, x_aug):
122
+ """
123
+ Extract original state from augmented state.
124
+
125
+ Args:
126
+ x_aug: Augmented state [..., total_dim]
127
+
128
+ Returns:
129
+ x: Original state [..., input_dim]
130
+ """
131
+ return x_aug[..., :self.config['input_dim']]
132
+
133
+ def inference(self, xt_batch, ut_batch=None):
134
+ # xt_batch: [B, L, D+1]
135
+ state_dim = self.config['input_dim']
136
+ x_batch = xt_batch[:, :, :state_dim]
137
+ t_batch = xt_batch[:, :, state_dim]
138
+ B, L, D = x_batch.shape
139
+
140
+ # Get initial state and augment it
141
+ x0 = x_batch[:, 0, :] # [B, input_dim]
142
+ x0_aug = self._augment_state(x0) # [B, total_dim]
143
+
144
+ # Setup time
145
+ t_batch_relative = t_batch - t_batch[:, 0:1]
146
+ max_time = torch.max(t_batch_relative[:, -1])
147
+ t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=L, device=self.device)
148
+
149
+ # Handle external input using utility function
150
+ u_interp = None
151
+ if ut_batch is not None and self.has_external_input:
152
+ # u_interp = interpolate_external_input(t_eval, t_batch_relative, ut_batch, self.external_input_dim)
153
+ t_batch_u = ut_batch[:, :, -1]
154
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
155
+ # print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
156
+
157
+ u_interp = interpolate_trajectory(
158
+ t_eval, # [T]
159
+ t_batch_u, # [B, L_u] (already relative)
160
+ ut_batch[:, :, :-1].permute(1, 0, 2) # traj: [L_u, B, U]
161
+ ).permute(1, 0, 2) # -> [T, B, U]
162
+
163
+ # Create enhanced vector field - need special handling for augmented state
164
+ def augmented_vector_field_with_input(t, x_aug):
165
+ if u_interp is None:
166
+ return self.vector_field(t, x_aug)
167
+
168
+ # Find time index
169
+ t_idx = torch.searchsorted(t_eval, t, right=False)
170
+ t_idx = torch.clamp(t_idx, 0, len(t_eval) - 1)
171
+
172
+ # Get interpolated external input at time t
173
+ u_t = u_interp[t_idx, :, :] # [B, U]
174
+
175
+ # Concatenate augmented state with external input
176
+ x_aug_u = torch.cat([x_aug, u_t], dim=-1) # [B, total_dim + external_input_dim]
177
+
178
+ return self.vector_field(t, x_aug_u)
179
+
180
+ vector_field_func = augmented_vector_field_with_input
181
+
182
+ # Solve ODE in augmented space
183
+ x_aug_trajectory = torchdiffeq.odeint(
184
+ vector_field_func, x0_aug, t_eval,
185
+ method=self.config.get('ode_method', 'rk4')
186
+ ) # [T, B, total_dim]
187
+
188
+ # Extract original state trajectory
189
+ x_trajectory = self._extract_original_state(x_aug_trajectory) # [T, B, input_dim]
190
+
191
+ return {
192
+ 'x0': x0,
193
+ 't_batch': t_batch,
194
+ 't_batch_relative': t_batch_relative,
195
+ 't_eval': t_eval,
196
+ 'x_trajectory': x_trajectory,
197
+ 'x_aug_trajectory': x_aug_trajectory, # For debugging/analysis
198
+ 'u_interp': u_interp,
199
+ }
models/architectures/autoregressive.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Autoregressive Model Architecture
3
+
4
+ Autoregressive model for sequential prediction: x_{k+1} = AG(x_k, Δt, u_k)
5
+ Supports MLP, RNN, and LSTM architectures for the AG (AutoreGressive) function.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.optim as optim
11
+ import itertools
12
+
13
+ from ..base import BaseModel
14
+ from ..components.mlp import MLPWithCustomInit
15
+ from ..components.initialization import init_dynamics_network
16
+ from ..utils import interpolate_trajectory
17
+
18
+
19
+ class AutoregressiveModel(BaseModel):
20
+ def __init__(self, config, device):
21
+ super().__init__(config, device)
22
+ input_dim = config['input_dim']
23
+ hidden_dims = config.get('hidden_dims_autoregressive', [64, 64])
24
+
25
+ # External input support
26
+ external_input_dim = config.get('external_input_dim', 0)
27
+
28
+ # Architecture type: 'mlp', 'rnn', 'lstm'
29
+ arch_type = config.get('autoregressive_architecture', 'mlp').lower()
30
+
31
+ # Get activation functions
32
+ default_activation = getattr(nn, config.get('default_activation', 'ReLU'))
33
+ activations = config.get('autoregressive_activations', None)
34
+
35
+ # Input to AG function: [x_k, Δt, u_k] -> concatenated input
36
+ ag_input_dim = input_dim + 1 + external_input_dim # x_k + Δt + u_k
37
+ ag_output_dim = input_dim # x_{k+1}
38
+
39
+ if arch_type == 'mlp':
40
+ self.ag_function = MLPWithCustomInit(
41
+ ag_input_dim, hidden_dims, ag_output_dim,
42
+ activation=default_activation,
43
+ activation_per_layer=activations
44
+ ).to(device)
45
+
46
+ elif arch_type == 'rnn':
47
+ # RNN-based AG function
48
+ self.hidden_dim = hidden_dims[0] if hidden_dims else 64
49
+ self.num_layers = config.get('rnn_num_layers', 2)
50
+ self.rnn = nn.RNN(
51
+ input_size=ag_input_dim,
52
+ hidden_size=self.hidden_dim,
53
+ num_layers=self.num_layers,
54
+ batch_first=True,
55
+ dropout=config.get('rnn_dropout', 0.0) if self.num_layers > 1 else 0.0
56
+ ).to(device)
57
+ self.output_layer = nn.Linear(self.hidden_dim, ag_output_dim).to(device)
58
+
59
+ # Combine RNN and output layer into a single module for consistency
60
+ self.ag_function = nn.Sequential(
61
+ nn.Identity() # Placeholder - actual RNN computation done in forward
62
+ )
63
+
64
+ elif arch_type == 'lstm':
65
+ # LSTM-based AG function
66
+ self.hidden_dim = hidden_dims[0] if hidden_dims else 64
67
+ self.num_layers = config.get('lstm_num_layers', 2)
68
+ self.lstm = nn.LSTM(
69
+ input_size=ag_input_dim,
70
+ hidden_size=self.hidden_dim,
71
+ num_layers=self.num_layers,
72
+ batch_first=True,
73
+ dropout=config.get('lstm_dropout', 0.0) if self.num_layers > 1 else 0.0
74
+ ).to(device)
75
+ self.output_layer = nn.Linear(self.hidden_dim, ag_output_dim).to(device)
76
+
77
+ # Combine LSTM and output layer into a single module for consistency
78
+ self.ag_function = nn.Sequential(
79
+ nn.Identity() # Placeholder - actual LSTM computation done in forward
80
+ )
81
+
82
+ else:
83
+ raise ValueError(f"Unknown architecture type: {arch_type}. Choose from 'mlp', 'rnn', 'lstm'")
84
+
85
+ self.arch_type = arch_type
86
+
87
+ # Initialize networks
88
+ if arch_type == 'mlp':
89
+ init_dynamics_network(self.ag_function, scale=config['dynamics_init_scale'])
90
+ else: # RNN/LSTM
91
+ init_dynamics_network(self.output_layer, scale=config['dynamics_init_scale'])
92
+ # Initialize RNN/LSTM with Xavier initialization
93
+ for name, param in (self.rnn if arch_type == 'rnn' else self.lstm).named_parameters():
94
+ if 'weight' in name:
95
+ nn.init.xavier_uniform_(param)
96
+ elif 'bias' in name:
97
+ nn.init.zeros_(param)
98
+
99
+ # Create optimizer
100
+ if arch_type == 'mlp':
101
+ params = self.ag_function.parameters()
102
+ else: # RNN/LSTM
103
+ rnn_module = self.rnn if arch_type == 'rnn' else self.lstm
104
+ params = itertools.chain(rnn_module.parameters(), self.output_layer.parameters())
105
+
106
+ self.optimizers = {
107
+ 'ag_function': optim.Adam(params, lr=config['learning_rate'])
108
+ }
109
+
110
+ # Print architecture information
111
+ self._print_architecture_info(config)
112
+
113
+ # Store configuration
114
+ config['autoregressive_architecture'] = arch_type
115
+ config['ag_input_dim'] = ag_input_dim
116
+ config['ag_output_dim'] = ag_output_dim
117
+
118
+ if external_input_dim > 0:
119
+ print(f"External input dimension: {external_input_dim}")
120
+ config['external_input_dim'] = external_input_dim
121
+
122
+ if arch_type != 'mlp':
123
+ config['hidden_dim'] = self.hidden_dim
124
+ config['num_layers'] = self.num_layers
125
+
126
+ # COMPLETELY REMOVE NORMALIZATION - set to False by default
127
+ self.normalize_dt = False
128
+ self.residual = True # residual form enforced
129
+
130
+ def _print_architecture_info(self, config):
131
+ """Print architecture information"""
132
+ if self.arch_type == 'mlp':
133
+ self.ag_function.print_architecture("Autoregressive MLP")
134
+ else:
135
+ print(f"\nAutoregressive {self.arch_type.upper()} Architecture:")
136
+ print(f" Architecture type: {self.arch_type.upper()}")
137
+ print(f" Input dimension: {config['input_dim'] + 1 + config.get('external_input_dim', 0)} (x + Δt + u)")
138
+ print(f" Hidden dimension: {self.hidden_dim}")
139
+ print(f" Number of layers: {self.num_layers}")
140
+ print(f" Output dimension: {config['input_dim']}")
141
+
142
+ # Count parameters
143
+ if self.arch_type == 'rnn':
144
+ rnn_params = sum(p.numel() for p in self.rnn.parameters())
145
+ output_params = sum(p.numel() for p in self.output_layer.parameters())
146
+ else: # lstm
147
+ rnn_params = sum(p.numel() for p in self.lstm.parameters())
148
+ output_params = sum(p.numel() for p in self.output_layer.parameters())
149
+
150
+ total_params = rnn_params + output_params
151
+ print(f" Total parameters: {total_params}")
152
+ print(f" {self.arch_type.upper()}: {rnn_params}")
153
+ print(f" Output layer: {output_params}")
154
+
155
+ def _interpolate_external_input(self, t_relative, ut_batch):
156
+ """Interpolate external input at relative times using utility function"""
157
+ if ut_batch is None or self.external_input_dim == 0:
158
+ return None
159
+
160
+ # Use utility function - just need to reshape inputs properly
161
+ u_batch = ut_batch[:, :, :self.external_input_dim] # [B, L_u, U]
162
+ t_u_batch = ut_batch[:, :, self.external_input_dim] # [B, L_u]
163
+
164
+ # Create source trajectory [L_u, B, U] for interpolate_trajectory
165
+ u_traj = u_batch.transpose(0, 1) # [L_u, B, U]
166
+
167
+ # Use first batch's time as reference (assume all batches have same relative timing)
168
+ u_interp = interpolate_trajectory(t_u_batch[0, :], t_relative, u_traj)
169
+
170
+ return u_interp.transpose(0, 1) # [B, L, U]
171
+
172
+ def _forward_step(self, x_k, dt, u_k=None, hidden_state=None):
173
+ """
174
+ Single forward step: x_{k+1} = AG(x_k, Δt, u_k)
175
+
176
+ Args:
177
+ x_k: Current state [B, input_dim]
178
+ dt: Time step [B, 1]
179
+ u_k: External input [B, external_input_dim] (optional)
180
+ hidden_state: Hidden state for RNN/LSTM (optional)
181
+
182
+ Returns:
183
+ x_k_plus_1: Next state [B, input_dim]
184
+ new_hidden_state: Updated hidden state (for RNN/LSTM)
185
+ """
186
+ # Concatenate x_k, dt, and u_k
187
+ inputs = [x_k, dt]
188
+ if u_k is not None:
189
+ inputs.append(u_k)
190
+ ag_input = torch.cat(inputs, dim=1) # [B, input_dim + 1 + external_input_dim]
191
+
192
+ if self.arch_type == 'mlp':
193
+ # MLP forward pass - CRITICAL FIX: Use proper forward call with two arguments
194
+ dx = self.ag_function(None, ag_input) # Use (t, x) interface where t is ignored
195
+ x_k_plus_1 = x_k + dx # residual form
196
+ return x_k_plus_1, None
197
+
198
+ elif self.arch_type == 'rnn':
199
+ # RNN forward pass
200
+ ag_input_seq = ag_input.unsqueeze(1) # [B, 1, input_dim + 1 + external_input_dim]
201
+ rnn_output, new_hidden_state = self.rnn(ag_input_seq, hidden_state)
202
+ dx = self.output_layer(rnn_output.squeeze(1))
203
+ x_k_plus_1 = x_k + dx
204
+ return x_k_plus_1, new_hidden_state
205
+
206
+ elif self.arch_type == 'lstm':
207
+ # LSTM forward pass
208
+ ag_input_seq = ag_input.unsqueeze(1) # [B, 1, input_dim + 1 + external_input_dim]
209
+ lstm_output, new_hidden_state = self.lstm(ag_input_seq, hidden_state)
210
+ dx = self.output_layer(lstm_output.squeeze(1))
211
+ x_k_plus_1 = x_k + dx
212
+ return x_k_plus_1, new_hidden_state
213
+
214
+ def _rollout_trajectory(self, x0, t_relative, u_interp=None):
215
+ """
216
+ Rollout using real (possibly non-uniform) time steps with external input.
217
+ t_relative: [B, L] relative times (first = 0)
218
+ u_interp: [B, L, U] interpolated external inputs (optional)
219
+ """
220
+ B, L = t_relative.shape
221
+ device = x0.device
222
+ x_traj = torch.zeros(L, B, self.config['input_dim'], device=device)
223
+ x_traj[0] = x0
224
+
225
+ # prepare dt sequence - NO NORMALIZATION AT ALL
226
+ dt_seq = t_relative[:, 1:] - t_relative[:, :-1] # [B, L-1]
227
+ # Use raw dt values directly without any normalization
228
+ dt_norm = dt_seq
229
+
230
+ # hidden state
231
+ hidden_state = None
232
+ if self.arch_type == 'rnn':
233
+ hidden_state = torch.zeros(self.num_layers, B, self.hidden_dim, device=device)
234
+ elif self.arch_type == 'lstm':
235
+ h0 = torch.zeros(self.num_layers, B, self.hidden_dim, device=device)
236
+ c0 = torch.zeros(self.num_layers, B, self.hidden_dim, device=device)
237
+ hidden_state = (h0, c0)
238
+
239
+ x_current = x0
240
+ for k in range(L - 1):
241
+ dt_k = dt_norm[:, k].unsqueeze(1) # [B,1] - raw dt values
242
+ u_k = u_interp[:, k, :] if u_interp is not None else None # [B, U]
243
+ x_next, hidden_state = self._forward_step(x_current, dt_k, u_k, hidden_state)
244
+ x_traj[k + 1] = x_next
245
+ x_current = x_next
246
+ return x_traj
247
+
248
+ def inference(self, xt_batch, ut_batch=None):
249
+ """
250
+ xt_batch: [B, L, D+1] (last dim is absolute time)
251
+ ut_batch: [B, L_u, U+1] (last dim is absolute time, optional)
252
+ Returns discrete trajectory at original time stamps (no interpolation).
253
+ """
254
+ state_dim = self.config['input_dim']
255
+ x_batch = xt_batch[:, :, :state_dim] # not used directly (closed-loop rollout)
256
+ t_batch = xt_batch[:, :, state_dim] # absolute times
257
+ B, L, _ = x_batch.shape
258
+ t_rel = t_batch - t_batch[:, 0:1]
259
+ x0 = x_batch[:, 0, :]
260
+
261
+ # Handle external input
262
+ u_interp = None
263
+ if ut_batch is not None and self.has_external_input:
264
+ # u_interp = self._interpolate_external_input(t_rel, ut_batch)
265
+ t_batch_u = ut_batch[:, :, -1]
266
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
267
+ # print(t_rel.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
268
+
269
+ u_interp = interpolate_trajectory(
270
+ t_rel, # [T]
271
+ t_batch_u, # [B, L_u] (already relative)
272
+ ut_batch[:, :, :-1].permute(1, 0, 2) # traj: [L_u, B, U]
273
+ ).permute(1, 0, 2) # -> [T, B, U]
274
+
275
+ x_traj = self._rollout_trajectory(x0, t_rel, u_interp) # [L,B,D]
276
+
277
+ return {
278
+ 'x0': x0,
279
+ 't_batch': t_batch,
280
+ 't_batch_relative': t_rel,
281
+ 't_eval': t_rel[0], # keep interface (first trajectory's relative times)
282
+ 'x_trajectory': x_traj,
283
+ 'u_interp': u_interp,
284
+ }
285
+
286
+ def get_layer_info(self):
287
+ """
288
+ Unified architecture_info interface for main_v2.
289
+ """
290
+ info = {
291
+ 'model_type': 'autoregressive',
292
+ 'architecture_type': self.arch_type,
293
+ 'residual': self.residual,
294
+ }
295
+ if self.arch_type == 'mlp':
296
+ mlp_info = self.ag_function.get_layer_info()
297
+ mlp_info['role'] = 'increment_predictor'
298
+ info['ag_function'] = mlp_info
299
+ else:
300
+ info['recurrent'] = {
301
+ 'hidden_dim': self.hidden_dim,
302
+ 'num_layers': self.num_layers,
303
+ 'increment_head_params': sum(p.numel() for p in self.output_layer.parameters()),
304
+ 'total_parameters': sum(p.numel() for p in self.parameters())
305
+ }
306
+ return info
models/architectures/event_ode.bak ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Event-based Neural ODE Model Architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.optim as optim
7
+ import itertools
8
+ import torchdiffeq
9
+
10
+ from ..base import BaseModel
11
+ from ..components.mlp import MLPWithCustomInit
12
+ from ..components.initialization import init_dynamics_network, init_event_network, init_reset_network
13
+ from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
14
+
15
+
16
+ class EventODEModel(BaseModel):
17
+ def __init__(self, config, device):
18
+ super().__init__(config, device)
19
+ input_dim = config['input_dim']
20
+ hidden_dims_vector_field = config['hidden_dims_vector_field']
21
+ hidden_dims_event = config.get('hidden_dims_event', [32, 16])
22
+ hidden_dims_reset = config.get('hidden_dims_reset', [32, 16])
23
+
24
+ # External input support
25
+ external_input_dim = config.get('external_input_dim', 0)
26
+ vector_field_input_dim = input_dim + external_input_dim
27
+ event_input_dim = input_dim + external_input_dim
28
+ reset_input_dim = input_dim + external_input_dim
29
+
30
+ # Get activation functions
31
+ default_activation = getattr(torch.nn, config.get('default_activation', 'ReLU'))
32
+
33
+ # Create vector field, event function, and state reset networks
34
+ self.vector_field = MLPWithCustomInit(
35
+ vector_field_input_dim, hidden_dims_vector_field, input_dim,
36
+ activation=default_activation
37
+ ).to(device)
38
+
39
+ self.event_function = MLPWithCustomInit(
40
+ event_input_dim, hidden_dims_event, 1,
41
+ activation=default_activation
42
+ ).to(device)
43
+
44
+ self.state_reset = MLPWithCustomInit(
45
+ reset_input_dim, hidden_dims_reset, input_dim,
46
+ activation=default_activation
47
+ ).to(device)
48
+
49
+ # Initialize networks with specialized functions
50
+ init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
51
+ init_event_network(self.event_function, scale=config.get('event_init_scale', 0.1))
52
+ init_reset_network(self.state_reset, scale=config.get('reset_init_scale', 0.05))
53
+
54
+ # Create optimizers with potentially different learning rates
55
+ self.optimizers = {
56
+ 'vector_field': optim.Adam(self.vector_field.parameters(),
57
+ lr=config.get('vector_field_lr', config['learning_rate'])),
58
+ 'event_function': optim.Adam(self.event_function.parameters(),
59
+ lr=config.get('event_lr', config['learning_rate'])),
60
+ 'state_reset': optim.Adam(self.state_reset.parameters(),
61
+ lr=config.get('reset_lr', config['learning_rate']))
62
+ }
63
+
64
+ # Print architecture information
65
+ self.vector_field.print_architecture("Vector Field")
66
+ self.event_function.print_architecture("Event Function")
67
+ self.state_reset.print_architecture("State Reset")
68
+
69
+ if external_input_dim > 0:
70
+ print(f"External input dimension: {external_input_dim}")
71
+ print(f"Vector field input dimension: {vector_field_input_dim} (state + external)")
72
+
73
+ # Print initialization information
74
+ print(f"Initialization scales:")
75
+ print(f" Vector field: {config['dynamics_init_scale']}")
76
+ print(f" Event function: {config.get('event_init_scale', 0.1)} (large to prevent runaway boundaries)")
77
+ print(f" State reset: {config.get('reset_init_scale', 0.05)}")
78
+
79
+ total_params = sum(p.numel() for p in itertools.chain(
80
+ self.vector_field.parameters(), self.event_function.parameters(), self.state_reset.parameters()))
81
+ print(f"Total parameters: {total_params}")
82
+
83
+ def _create_vector_field_with_input(self, u_interp, t_eval):
84
+ """Create a vector field function that includes interpolated external input"""
85
+ def vector_field_func(t, x):
86
+ if u_interp is None:
87
+ return self.vector_field(t, x)
88
+
89
+ # Find the time index for interpolation
90
+ t_idx = torch.searchsorted(t_eval, t, right=False)
91
+ t_idx = torch.clamp(t_idx, 0, len(t_eval) - 1)
92
+
93
+ # Get interpolated u at time t
94
+ u_t = u_interp[t_idx, :, :] # [B, U]
95
+
96
+ return self.vector_field(t, x, u_t)
97
+
98
+ return vector_field_func
99
+
100
+ def _event_ode_with_reset(self, x0, t_span, u_interp=None, event_threshold=0.0):
101
+ """
102
+ Solve ODE with event detection and state reset including external input
103
+
104
+ Args:
105
+ x0: Initial condition [B, D]
106
+ t_span: Time span [T]
107
+ u_interp: Interpolated external input [T, B, U] (optional)
108
+ event_threshold: Threshold for event detection (default: 0.0)
109
+
110
+ Returns:
111
+ x_trajectory: [T, B, D] trajectory with resets
112
+ event_times: List of event times
113
+ event_states: List of states at event times
114
+ """
115
+ B, D = x0.shape
116
+ T = len(t_span)
117
+ device = x0.device
118
+
119
+ # Initialize trajectory storage
120
+ x_trajectory = torch.zeros(T, B, D, device=device)
121
+ x_trajectory[0] = x0
122
+
123
+ # Event tracking
124
+ event_times = []
125
+ event_states = []
126
+
127
+ # Current state
128
+ x_current = x0.clone()
129
+
130
+ # Create vector field with external input
131
+ vector_field_func = self._create_vector_field_with_input(u_interp, t_span)
132
+
133
+ # Solve ODE step by step with event detection
134
+ for i in range(1, T):
135
+ dt = t_span[i] - t_span[i-1]
136
+ t_current = t_span[i-1:i+1]
137
+
138
+ # Solve ODE for this time step
139
+ x_step = torchdiffeq.odeint(
140
+ vector_field_func, x_current, t_current,
141
+ method=self.config.get('ode_method', 'rk4')
142
+ ) # [2, B, D]
143
+
144
+ x_next = x_step[-1] # [B, D]
145
+
146
+ # Check for events
147
+ with torch.no_grad():
148
+ # Get external input for event function if available
149
+ if u_interp is not None:
150
+ u_current = u_interp[i, :, :] # [B, U]
151
+ event_input = torch.cat([x_next, u_current], dim=-1)
152
+ event_values = self.event_function([], event_input).squeeze(-1) # [B]
153
+ else:
154
+ event_values = self.event_function([], x_next).squeeze(-1) # [B]
155
+
156
+ event_mask = (event_values > event_threshold) # [B]
157
+
158
+ if event_mask.any():
159
+ # Apply state reset for trajectories that triggered events
160
+ if u_interp is not None:
161
+ reset_input = torch.cat([x_next, u_current], dim=-1)
162
+ x_reset = self.state_reset([], reset_input) # [B, D]
163
+ else:
164
+ x_reset = self.state_reset([], x_next) # [B, D]
165
+
166
+ x_next = torch.where(event_mask.unsqueeze(-1), x_reset, x_next)
167
+
168
+ # Record event information
169
+ event_times.append(t_span[i].item())
170
+ event_states.append(x_next[event_mask].clone())
171
+
172
+ x_trajectory[i] = x_next
173
+ x_current = x_next
174
+
175
+ return x_trajectory, event_times, event_states
176
+
177
+ def inference(self, xt_batch, ut_batch=None):
178
+ # xt_batch: [B, L, D+1]
179
+ state_dim = self.config['input_dim']
180
+ x_batch = xt_batch[:, :, :state_dim]
181
+ t_batch = xt_batch[:, :, state_dim]
182
+ B, L, D = x_batch.shape
183
+
184
+ x0 = x_batch[:, 0, :]
185
+ t_batch_relative = t_batch - t_batch[:, 0:1]
186
+ max_time = torch.max(t_batch_relative[:, -1])
187
+
188
+ # Create time evaluation grid
189
+ time_step = self.config.get('time_step', 0.1)
190
+ n_steps = max(L, int(torch.ceil(max_time / time_step).item()) + 1)
191
+ t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=n_steps, device=self.device)
192
+
193
+ # Handle external input using utility function
194
+ u_interp = None
195
+ if ut_batch is not None and self.has_external_input:
196
+ # u_interp = interpolate_external_input(t_eval, t_batch_relative, ut_batch, self.external_input_dim)
197
+ t_batch_u = ut_batch[:, :, -1]
198
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
199
+ # print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
200
+
201
+ u_interp = interpolate_trajectory(
202
+ t_eval, # [T]
203
+ t_batch_u, # [B, L_u] (already relative)
204
+ ut_batch[:, :, :-1].permute(1, 0, 2) # traj: [L_u, B, U]
205
+ ).permute(1, 0, 2) # -> [T, B, U]
206
+
207
+ # Create enhanced vector field
208
+ vector_field_func = create_vector_field_with_external_input(self.vector_field, u_interp, t_eval)
209
+
210
+ # Solve Event ODE with event detection
211
+ event_threshold = self.config.get('event_threshold', 0.0)
212
+ x_trajectory, event_times, event_states = self._event_ode_with_reset(
213
+ x0, t_eval, u_interp, event_threshold
214
+ )
215
+
216
+ return {
217
+ 'x0': x0,
218
+ 't_batch': t_batch,
219
+ 't_batch_relative': t_batch_relative,
220
+ 't_eval': t_eval,
221
+ 'x_trajectory': x_trajectory,
222
+ 'event_times': event_times,
223
+ 'event_states': event_states,
224
+ 'num_events': len(event_times),
225
+ 'u_interp': u_interp,
226
+ }
models/architectures/event_ode.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Event-based Neural ODE Model Architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.optim as optim
7
+ import itertools
8
+ import torchdiffeq
9
+ import torch.nn as nn
10
+
11
+ from ..base import BaseModel
12
+ from ..components.mlp import MLPWithCustomInit
13
+ from ..components.initialization import init_dynamics_network, init_event_network, init_reset_network
14
+ from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
15
+
16
+
17
+ class EventODEModel(BaseModel):
18
+ def __init__(self, config, device):
19
+ super().__init__(config, device)
20
+ input_dim = config['input_dim']
21
+ hidden_dims_vector_field = config['hidden_dims_vector_field']
22
+ hidden_dims_event = config.get('hidden_dims_event', [32, 16])
23
+ hidden_dims_reset = config.get('hidden_dims_reset', [32, 16])
24
+
25
+ # External input support
26
+ external_input_dim = config.get('external_input_dim', 0)
27
+ vector_field_input_dim = input_dim + external_input_dim
28
+ event_input_dim = input_dim + external_input_dim
29
+ reset_input_dim = input_dim + external_input_dim
30
+
31
+ # Get activation functions
32
+ default_activation = getattr(torch.nn, config.get('default_activation', 'ReLU'))
33
+
34
+ # Create vector field, event function, and state reset networks
35
+ self.vector_field = MLPWithCustomInit(
36
+ vector_field_input_dim, hidden_dims_vector_field, input_dim,
37
+ activation=default_activation
38
+ ).to(device)
39
+
40
+ self.event_function = MLPWithCustomInit(
41
+ event_input_dim, hidden_dims_event, 1,
42
+ activation=default_activation
43
+ ).to(device)
44
+
45
+ self.state_reset = MLPWithCustomInit(
46
+ reset_input_dim, hidden_dims_reset, input_dim,
47
+ activation=default_activation
48
+ ).to(device)
49
+
50
+ # Initialize networks with specialized functions
51
+ init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
52
+ init_event_network(self.event_function, scale=config.get('event_init_scale', 0.1 * 100))
53
+ init_reset_network(self.state_reset, scale=config.get('reset_init_scale', 0.05 * 100))
54
+
55
+ # Create optimizers with potentially different learning rates
56
+ self.optimizers = {
57
+ 'vector_field': optim.Adam(self.vector_field.parameters(),
58
+ lr=config.get('vector_field_lr', config['learning_rate'])),
59
+ 'event_function': optim.Adam(self.event_function.parameters(),
60
+ lr=config.get('event_lr', config['learning_rate'])),
61
+ 'state_reset': optim.Adam(self.state_reset.parameters(),
62
+ lr=config.get('reset_lr', config['learning_rate']))
63
+ }
64
+
65
+ # Print architecture information
66
+ self.vector_field.print_architecture("Vector Field")
67
+ self.event_function.print_architecture("Event Function")
68
+ self.state_reset.print_architecture("State Reset")
69
+
70
+ if external_input_dim > 0:
71
+ print(f"External input dimension: {external_input_dim}")
72
+ print(f"Vector field input dimension: {vector_field_input_dim} (state + external)")
73
+
74
+ # Print initialization information
75
+ print(f"Initialization scales:")
76
+ print(f" Vector field: {config['dynamics_init_scale']}")
77
+ print(f" Event function: {config.get('event_init_scale', 0.1)} (large to prevent runaway boundaries)")
78
+ print(f" State reset: {config.get('reset_init_scale', 0.05)}")
79
+
80
+ total_params = sum(p.numel() for p in itertools.chain(
81
+ self.vector_field.parameters(), self.event_function.parameters(), self.state_reset.parameters()))
82
+ print(f"Total parameters: {total_params}")
83
+
84
+ def _event_ode_with_reset(self, x0, t_span, u_interp=None, event_threshold=0.0):
85
+ """
86
+ Solve ODE with event detection and state reset using odeint_event for event times,
87
+ then densely integrate along the provided t_span grid between events until t_span[-1].
88
+ Returns x_trajectory aligned to t_span, and lists of event times/states.
89
+ """
90
+
91
+ # #####
92
+ # print(f"Terminaltion time = {t_span[-1]}")
93
+ # ####
94
+ B, D = x0.shape
95
+ T = len(t_span)
96
+ device = x0.device
97
+
98
+ # Vector field with external input (signature: f(t, state) -> dx/dt)
99
+ vector_field_func = create_vector_field_with_external_input(self.vector_field, u_interp, t_span)
100
+
101
+ # Event function for odeint_event: batch-min with threshold (zero crossing)
102
+ def event_fn(t, state):
103
+ # print(f"Event function called at t={t}")
104
+ if u_interp is not None:
105
+ idx = torch.searchsorted(t_span, t, right=False)
106
+ idx = torch.clamp(idx, 0, T - 1)
107
+ u_t = u_interp[idx, :, :]
108
+ vals = self.event_function([], torch.cat([state, u_t], dim=-1)).squeeze(-1) # [B]
109
+ else:
110
+ vals = self.event_function([], state).squeeze(-1) # [B]
111
+
112
+ vals = (vals - event_threshold).min()
113
+ # print(f"Event function values at t={t}: {vals.item():6f}")
114
+ return vals
115
+
116
+ # Storage (raw dense samples); start with initial point
117
+ time_samples = [t_span[0].reshape(-1)] # list of [k]
118
+ state_samples = [x0.unsqueeze(0)] # list of [k,B,D]
119
+ event_times = []
120
+ event_states = []
121
+
122
+ # Integration settings
123
+ atol = self.config.get('ode_atol', 1e-6)
124
+ rtol = self.config.get('ode_rtol', 1e-6)
125
+ method = self.config.get('ode_method', 'rk4')
126
+
127
+ # Current state/time
128
+ current_time = t_span[0]
129
+ current_state = x0
130
+
131
+ # Main loop until end of t_span
132
+ safety_steps = 0
133
+ while float(current_time) < float(t_span[-1]) - 1e-12:
134
+ safety_steps += 1
135
+ if safety_steps > 100000:
136
+ print("[WARN] Max safety steps reached in _event_ode_with_reset; breaking.")
137
+ break
138
+
139
+ # 1) Detect next event time from current_time
140
+ try:
141
+ options = {'max_num_steps': 100}
142
+ event_t, _ = torchdiffeq.odeint_event(
143
+ vector_field_func,
144
+ current_state,
145
+ current_time,
146
+ # t_span,
147
+ event_fn=event_fn,
148
+ reverse_time=False,
149
+ atol=atol,
150
+ rtol=rtol,
151
+ odeint_interface=torchdiffeq.odeint_adjoint,
152
+ options = options,
153
+ # t_span = t_span,
154
+ )
155
+ print(f"Detected event at t={event_t}, safety_steps={safety_steps}")
156
+ except Exception:
157
+ # Fallback: no event detectable; go to the end
158
+ event_t = t_span[-1]
159
+ # print(f"No event at triggered! Fuck!")
160
+
161
+ # Clamp event time to final span
162
+ # print(f"t_span[-1] = {t_span[-1].item():6f}")
163
+ if float(event_t) > float(t_span[-1]):
164
+ event_t = t_span[-1]
165
+ # print("No fucking event")
166
+
167
+ # 2) Dense integration on t_span between (current_time, event_t]
168
+ mask = (t_span > current_time) & (t_span <= event_t)
169
+ if mask.any():
170
+ tt = torch.cat([current_time.reshape(-1), t_span[mask]], dim=0)
171
+ x_seg = torchdiffeq.odeint_adjoint(
172
+ vector_field_func, current_state, tt, method=method, atol=atol, rtol=rtol
173
+ ) # [K, B, D]
174
+ # Append excluding first point to avoid duplication
175
+ time_samples.append(tt[1:])
176
+ state_samples.append(x_seg[1:])
177
+ current_state = x_seg[-1]
178
+ else:
179
+ # No grid points in (current_time, event_t]; still advance to event_t for reset
180
+ tt = torch.stack([current_time, event_t])
181
+ x_seg = torchdiffeq.odeint_adjoint(
182
+ vector_field_func, current_state, tt, method=method, atol=atol, rtol=rtol
183
+ )
184
+ # Append event point for interpolation coverage
185
+ time_samples.append(event_t.reshape(-1))
186
+ state_samples.append(x_seg[-1:].reshape(1, B, D))
187
+ current_state = x_seg[-1]
188
+
189
+ # 3) If reached end, stop
190
+ if float(event_t) >= float(t_span[-1]) - 1e-12:
191
+ current_time = event_t
192
+ break
193
+
194
+ # 4) Record event and apply reset (use interpolate_trajectory for u_t)
195
+ event_times.append(event_t)
196
+ event_states.append(current_state.clone())
197
+ if u_interp is not None:
198
+ t_query_ev = torch.full((B, 1), float(event_t), device=device, dtype=t_span.dtype)
199
+ u_ev = interpolate_trajectory(t_span, t_query_ev, u_interp) # [B,1,U]
200
+ reset_inp = torch.cat([current_state, u_ev[:, 0, :]], dim=-1)
201
+ current_state = self.state_reset([], reset_inp)
202
+ else:
203
+ current_state = self.state_reset([], current_state)
204
+ current_time = event_t
205
+
206
+ # Re-interpolate trajectory back to t_span
207
+ t_dense = torch.cat(time_samples, dim=0) # [K]
208
+ x_dense = torch.cat(state_samples, dim=0) # [K,B,D]
209
+ t_query_full = t_span.unsqueeze(0).expand(B, -1) # [B,T]
210
+ x_interp = interpolate_trajectory(t_dense, t_query_full, x_dense) # [B,T,D]
211
+ x_out = x_interp.permute(1, 0, 2) # [T,B,D]
212
+
213
+ return x_out, event_times, event_states
214
+
215
+ def inference(self, xt_batch, ut_batch=None):
216
+ # xt_batch: [B, L, D+1]
217
+ state_dim = self.config['input_dim']
218
+ x_batch = xt_batch[:, :, :state_dim]
219
+ t_batch = xt_batch[:, :, state_dim]
220
+ B, L, D = x_batch.shape
221
+
222
+ x0 = x_batch[:, 0, :]
223
+ t_batch_relative = t_batch - t_batch[:, 0:1]
224
+ max_time = torch.max(t_batch_relative[:, -1])
225
+
226
+ # Create time evaluation grid
227
+ time_step = self.config.get('time_step', 0.1)
228
+ n_steps = max(L, int(torch.ceil(max_time / time_step).item()) + 1)
229
+ t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=n_steps, device=self.device)
230
+
231
+ # Handle external input using utility function
232
+ u_interp = None
233
+ if ut_batch is not None and self.has_external_input:
234
+ # u_interp = interpolate_external_input(t_eval, t_batch_relative, ut_batch, self.external_input_dim)
235
+ t_batch_u = ut_batch[:, :, -1]
236
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
237
+ # print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
238
+
239
+ u_interp = interpolate_trajectory(
240
+ t_eval, # [T]
241
+ t_batch_u, # [B, L_u] (already relative)
242
+ ut_batch[:, :, :-1].permute(1, 0, 2) # traj: [L_u, B, U]
243
+ ).permute(1, 0, 2) # -> [T, B, U]
244
+
245
+ # Create enhanced vector field
246
+ # vector_field_func = create_vector_field_with_external_input(self.vector_field, u_interp, t_eval)
247
+
248
+ # Solve Event ODE with event detection
249
+ event_threshold = self.config.get('event_threshold', 0.0)
250
+ x_trajectory, event_times, event_states = self._event_ode_with_reset(
251
+ x0, t_eval, u_interp, event_threshold
252
+ )
253
+
254
+ # print(f"Total events detected: {len(event_times)}")
255
+
256
+ return {
257
+ 'x0': x0,
258
+ 't_batch': t_batch,
259
+ 't_batch_relative': t_batch_relative,
260
+ 't_eval': t_eval,
261
+ 'x_trajectory': x_trajectory,
262
+ 'event_times': event_times,
263
+ 'event_states': event_states,
264
+ 'num_events': len(event_times),
265
+ 'u_interp': u_interp,
266
+ }
models/architectures/hybrid.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Hybrid Latent ODE Model Architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.optim as optim
7
+ import itertools
8
+ import torchdiffeq
9
+
10
+ from ..base import BaseModel
11
+ from ..components.mlp import MLPWithCustomInit
12
+ from ..components.initialization import init_autoencoder_network, init_dynamics_network
13
+ from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
14
+
15
+ from torchdiffeq import odeint_adjoint as odeint
16
+
17
+ class HybridLatentODEModel(BaseModel):
18
+ def __init__(self, config, device):
19
+ super().__init__(config, device)
20
+ input_dim = config['input_dim']
21
+ hidden_dims_enc = config.get('hidden_dims_enc', [])# config['hidden_dims_enc']
22
+ # FIX: Properly extract and use hidden_dims_dec from config
23
+ hidden_dims_dec = config.get('hidden_dims_dec', hidden_dims_enc[::-1])
24
+ hidden_dims_vector_field = config['hidden_dims_vector_field']
25
+ latent_dim = config['latent_dim']
26
+
27
+ # External input support
28
+ external_input_dim = config.get('external_input_dim', 0)
29
+ vector_field_input_dim = latent_dim + external_input_dim
30
+ self.has_external_input = external_input_dim > 0 # --- NEW ---
31
+
32
+ # Get activation functions for each component
33
+ encoder_activations = config.get('encoder_activations', None)
34
+ decoder_activations = config.get('decoder_activations', None)
35
+ vector_field_activations = config.get('vector_field_activations', None)
36
+ default_activation = getattr(torch.nn, config.get('default_activation', 'ReLU'))
37
+
38
+ # Create models
39
+ self.encoder = MLPWithCustomInit(
40
+ input_dim, hidden_dims_enc, latent_dim,
41
+ activation=default_activation,
42
+ activation_per_layer=encoder_activations,
43
+ dim_in_linear=config.get('dim_linear_in_encoder', [0, 0]), # should may equal to DOF of manifold M(latent_dim) or the position components.
44
+ dim_out_linear=config.get('dim_linear_out_encoder', 0), # should equal to the DOF of position components
45
+ hidden_dim_linear=config.get('hidden_dim_linear_encoder', []) # if empty, use linear branch
46
+ ).to(device)
47
+
48
+ # vector field with linear:
49
+ # d[q; v] / dt = [A q + B v; MLP(q; v; u)]
50
+ self.vector_field = MLPWithCustomInit(
51
+ vector_field_input_dim, hidden_dims_vector_field, latent_dim,
52
+ activation=default_activation,
53
+ activation_per_layer=vector_field_activations,
54
+ dim_in_linear=config.get('dim_linear_in_vec_field', [0, 0]), # should may equal to DOF of manifold M(latent_dim) or the position components.
55
+ dim_out_linear=config.get('dim_linear_out_vec_field', 0), # should equal to the DOF of position components
56
+ hidden_dim_linear=config.get('hidden_dim_linear_vec_field', []) # if empty, use linear branch
57
+ ).to(device)
58
+
59
+ # FIX: Ensure decoder uses the correct dimensions
60
+ self.decoder = MLPWithCustomInit(
61
+ latent_dim, hidden_dims_dec, input_dim,
62
+ activation=default_activation,
63
+ activation_per_layer=decoder_activations,
64
+ dim_in_linear=config.get('dim_linear_in_decoder', [0, 0]), # should may equal to DOF of manifold M(latent_dim) or the position components.
65
+ dim_out_linear=config.get('dim_linear_out_decoder', 0), # should equal to the DOF of position components
66
+ hidden_dim_linear=config.get('hidden_dim_linear_decoder', []) # if empty, use linear branch
67
+ ).to(device)
68
+
69
+ # Initialize networks
70
+ init_autoencoder_network(self.encoder)
71
+ init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
72
+ init_autoencoder_network(self.decoder)
73
+
74
+ # Create optimizers with different learning rates
75
+ self.optimizers = {
76
+ 'encoder': optim.Adam(self.encoder.parameters(), lr=config.get('encoder_lr', config['learning_rate'])),
77
+ 'vector_field': optim.Adam(self.vector_field.parameters(), lr=config.get('vector_field_lr', config['learning_rate'])),
78
+ 'decoder': optim.Adam(self.decoder.parameters(), lr=config.get('decoder_lr', config['learning_rate']))
79
+ }
80
+
81
+ # Print architecture information
82
+ self.encoder.print_architecture("Encoder")
83
+ self.vector_field.print_architecture("Vector Field")
84
+ self.decoder.print_architecture("Decoder")
85
+
86
+ if external_input_dim > 0:
87
+ print(f"External input dimension: {external_input_dim}")
88
+ print(f"Vector field input dimension: {vector_field_input_dim} (latent {latent_dim}"
89
+ f"{' + external ' + str(external_input_dim) if external_input_dim>0 else ''})")
90
+
91
+ # Store layer information in config for logging
92
+ config['encoder_layer_info'] = self.encoder.get_layer_info()
93
+ config['vector_field_layer_info'] = self.vector_field.get_layer_info()
94
+ config['decoder_layer_info'] = self.decoder.get_layer_info()
95
+
96
+ total_params = sum(p.numel() for p in itertools.chain(
97
+ self.encoder.parameters(), self.vector_field.parameters(), self.decoder.parameters()))
98
+ print(f"Total parameters: {total_params}")
99
+
100
+ def inference(self, xt_batch, ut_batch=None, keep_Jx=False, infer_x=False):
101
+ # xt_batch: [B, L, D+1]
102
+ state_dim = self.config['input_dim']
103
+ latent_dim = self.config['latent_dim']
104
+ x_batch = xt_batch[:, :, :state_dim]
105
+ t_batch = xt_batch[:, :, state_dim]
106
+ B, L, D = x_batch.shape
107
+
108
+ x_flat = x_batch.reshape(-1, D)
109
+ # z_encoded, J_encoded = self.encoder([], x_flat) # OLD
110
+ if keep_Jx:
111
+ # z_encoded, J_encoded = self.encoder.forward_Jac(None, x_flat) # NEW: get Jacobian
112
+ z_encoded = self.encoder.forward(None, x_flat)
113
+ num_smple = self.config.get('max_iso_samples', x_flat.shape[0])
114
+ # Sample points from x_flat
115
+ if num_smple < x_flat.shape[0]:
116
+ indices = torch.randperm(x_flat.shape[0])[:num_smple]
117
+ x_sampled = x_flat[indices]
118
+ else:
119
+ x_sampled = x_flat
120
+ _, J_encoded = self.encoder.forward_Jac(None, x_sampled) # NEW: get Jacobian
121
+ else:
122
+ z_encoded = self.encoder.forward(None, x_flat)
123
+ J_encoded = None
124
+
125
+ z_encoded_traj = z_encoded.reshape(B, L, latent_dim)
126
+ z0 = z_encoded_traj[:, 0, :]
127
+ t_batch_relative = t_batch - t_batch[:, 0:1]
128
+ max_time = torch.max(t_batch_relative[:, -1])
129
+ t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=L, device=self.device)
130
+
131
+ # Handle external input using utility function
132
+ u_interp = None
133
+ if ut_batch is not None and self.has_external_input: # uses flag defined above
134
+ t_batch_u = ut_batch[:, :, -1]
135
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
136
+ # print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
137
+
138
+ u_interp = interpolate_trajectory(
139
+ t_eval,
140
+ t_batch_u,
141
+ ut_batch[:, :, :-1].permute(1, 0, 2)
142
+ ).permute(1, 0, 2)
143
+
144
+ # Create enhanced vector field using utility function
145
+ vector_field_func = create_vector_field_with_external_input(self.vector_field, u_interp, t_eval)
146
+
147
+ z_trajectory = torchdiffeq.odeint_adjoint(vector_field_func, z0, t_eval, method=self.config['ode_method'])
148
+
149
+ # decode all z
150
+ if infer_x:
151
+ x_trajectory = self.decoder([], z_trajectory.reshape(-1, latent_dim)).reshape(L, B, D)
152
+ else:
153
+ x_trajectory = None
154
+
155
+ return {
156
+ 'x0': x_batch[:, 0, :],
157
+ 't_batch': t_batch,
158
+ 't_batch_relative': t_batch_relative,
159
+ 't_eval': t_eval,
160
+ 'z_trajectory': z_trajectory,
161
+ 'x_trajectory': x_trajectory,
162
+ 'z_encoded_traj': z_encoded_traj,
163
+ 'u_interp': u_interp,
164
+ 'J_encoded': J_encoded ## for the isometry loss.
165
+ }
models/architectures/latent_rnn.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Hybrid Latent ODE Model Architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.optim as optim
7
+ import itertools
8
+ import torchdiffeq
9
+
10
+ from ..base import BaseModel
11
+ from ..components.mlp import MLPWithCustomInit
12
+ from ..components.initialization import init_autoencoder_network, init_dynamics_network
13
+ from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
14
+
15
+ from torchdiffeq import odeint_adjoint as odeint
16
+
17
+ class RNNLatentODE(BaseModel):
18
+ def __init__(self, config, device):
19
+ super().__init__(config, device)
20
+ input_dim = config['input_dim']
21
+ hidden_dims_enc = config.get('hidden_dims_enc', [])# config['hidden_dims_enc']
22
+ # FIX: Properly extract and use hidden_dims_dec from config
23
+ hidden_dims_dec = config.get('hidden_dims_dec', hidden_dims_enc[::-1])
24
+ hidden_dims_vector_field = config['hidden_dims_vector_field']
25
+ latent_dim = config['latent_dim']
26
+
27
+ # External input support
28
+ external_input_dim = config.get('external_input_dim', 0)
29
+ vector_field_input_dim = latent_dim + external_input_dim
30
+ self.has_external_input = external_input_dim > 0 # --- NEW ---
31
+
32
+ # Get activation functions for each component
33
+ encoder_activations = config.get('encoder_activations', None)
34
+ decoder_activations = config.get('decoder_activations', None)
35
+ vector_field_activations = config.get('vector_field_activations', None)
36
+ default_activation = getattr(torch.nn, config.get('default_activation', 'ReLU'))
37
+
38
+ # Create models
39
+ self.encoder = MLPWithCustomInit(
40
+ input_dim, hidden_dims_enc, latent_dim,
41
+ activation=default_activation,
42
+ activation_per_layer=encoder_activations,
43
+ dim_in_linear=config.get('dim_linear_in_encoder', [0, 0]), # should may equal to DOF of manifold M(latent_dim) or the position components.
44
+ dim_out_linear=config.get('dim_linear_out_encoder', 0), # should equal to the DOF of position components
45
+ hidden_dim_linear=config.get('hidden_dim_linear_encoder', []) # if empty, use linear branch
46
+ ).to(device)
47
+
48
+ # vector field with linear:
49
+ # d[q; v] / dt = [A q + B v; MLP(q; v; u)]
50
+ self.vector_field = MLPWithCustomInit(
51
+ vector_field_input_dim, hidden_dims_vector_field, latent_dim,
52
+ activation=default_activation,
53
+ activation_per_layer=vector_field_activations,
54
+ dim_in_linear=config.get('dim_linear_in_vec_field', [0, 0]), # should may equal to DOF of manifold M(latent_dim) or the position components.
55
+ dim_out_linear=config.get('dim_linear_out_vec_field', 0), # should equal to the DOF of position components
56
+ hidden_dim_linear=config.get('hidden_dim_linear_vec_field', []) # if empty, use linear branch
57
+ ).to(device)
58
+
59
+ # FIX: Ensure decoder uses the correct dimensions
60
+ self.decoder = MLPWithCustomInit(
61
+ latent_dim, hidden_dims_dec, input_dim,
62
+ activation=default_activation,
63
+ activation_per_layer=decoder_activations,
64
+ dim_in_linear=config.get('dim_linear_in_decoder', [0, 0]), # should may equal to DOF of manifold M(latent_dim) or the position components.
65
+ dim_out_linear=config.get('dim_linear_out_decoder', 0), # should equal to the DOF of position components
66
+ hidden_dim_linear=config.get('hidden_dim_linear_decoder', []) # if empty, use linear branch
67
+ ).to(device)
68
+
69
+ # Initialize networks
70
+ init_autoencoder_network(self.encoder)
71
+ init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
72
+ init_autoencoder_network(self.decoder)
73
+
74
+ # Create optimizers with different learning rates
75
+ self.optimizers = {
76
+ 'encoder': optim.Adam(self.encoder.parameters(), lr=config.get('encoder_lr', config['learning_rate'])),
77
+ 'vector_field': optim.Adam(self.vector_field.parameters(), lr=config.get('vector_field_lr', config['learning_rate'])),
78
+ 'decoder': optim.Adam(self.decoder.parameters(), lr=config.get('decoder_lr', config['learning_rate']))
79
+ }
80
+
81
+ # Print architecture information
82
+ self.encoder.print_architecture("Encoder")
83
+ self.vector_field.print_architecture("Vector Field")
84
+ self.decoder.print_architecture("Decoder")
85
+
86
+ if external_input_dim > 0:
87
+ print(f"External input dimension: {external_input_dim}")
88
+ print(f"Vector field input dimension: {vector_field_input_dim} (latent {latent_dim}"
89
+ f"{' + external ' + str(external_input_dim) if external_input_dim>0 else ''})")
90
+
91
+ # Store layer information in config for logging
92
+ config['encoder_layer_info'] = self.encoder.get_layer_info()
93
+ config['vector_field_layer_info'] = self.vector_field.get_layer_info()
94
+ config['decoder_layer_info'] = self.decoder.get_layer_info()
95
+
96
+ total_params = sum(p.numel() for p in itertools.chain(
97
+ self.encoder.parameters(), self.vector_field.parameters(), self.decoder.parameters()))
98
+ print(f"Total parameters: {total_params}")
99
+
100
+ def inference(self, xt_batch, ut_batch=None, keep_Jx=False, infer_x=False):
101
+ # xt_batch: [B, L, D+1]
102
+ state_dim = self.config['input_dim']
103
+ latent_dim = self.config['latent_dim']
104
+ x_batch = xt_batch[:, :, :state_dim]
105
+ t_batch = xt_batch[:, :, state_dim]
106
+ B, L, D = x_batch.shape
107
+
108
+ x_flat = x_batch.reshape(-1, D)
109
+ # z_encoded, J_encoded = self.encoder([], x_flat) # OLD
110
+ if keep_Jx:
111
+ # z_encoded, J_encoded = self.encoder.forward_Jac(None, x_flat) # NEW: get Jacobian
112
+ z_encoded = self.encoder.forward(None, x_flat)
113
+ num_smple = self.config.get('max_iso_samples', x_flat.shape[0])
114
+ # Sample points from x_flat
115
+ if num_smple < x_flat.shape[0]:
116
+ indices = torch.randperm(x_flat.shape[0])[:num_smple]
117
+ x_sampled = x_flat[indices]
118
+ else:
119
+ x_sampled = x_flat
120
+ _, J_encoded = self.encoder.forward_Jac(None, x_sampled) # NEW: get Jacobian
121
+ else:
122
+ z_encoded = self.encoder.forward(None, x_flat)
123
+ J_encoded = None
124
+
125
+ z_encoded_traj = z_encoded.reshape(B, L, latent_dim)
126
+ z0 = z_encoded_traj[:, 0, :]
127
+ t_batch_relative = t_batch - t_batch[:, 0:1]
128
+ max_time = torch.max(t_batch_relative[:, -1])
129
+ t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=L, device=self.device)
130
+
131
+ # Handle external input using utility function
132
+ u_interp = None
133
+ if ut_batch is not None and self.has_external_input: # uses flag defined above
134
+ t_batch_u = ut_batch[:, :, -1]
135
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
136
+ # print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
137
+
138
+ u_interp = interpolate_trajectory(
139
+ t_eval,
140
+ t_batch_u,
141
+ ut_batch[:, :, :-1].permute(1, 0, 2)
142
+ ).permute(1, 0, 2)
143
+
144
+ # Create enhanced vector field using utility function
145
+ vector_field_func = create_vector_field_with_external_input(self.vector_field, u_interp, t_eval)
146
+
147
+ z_trajectory = torchdiffeq.odeint_adjoint(vector_field_func, z0, t_eval, method=self.config['ode_method'])
148
+
149
+ # decode all z
150
+ if infer_x:
151
+ x_trajectory = self.decoder([], z_trajectory.reshape(-1, latent_dim)).reshape(L, B, D)
152
+ else:
153
+ x_trajectory = None
154
+
155
+ return {
156
+ 'x0': x_batch[:, 0, :],
157
+ 't_batch': t_batch,
158
+ 't_batch_relative': t_batch_relative,
159
+ 't_eval': t_eval,
160
+ 'z_trajectory': z_trajectory,
161
+ 'x_trajectory': x_trajectory,
162
+ 'z_encoded_traj': z_encoded_traj,
163
+ 'u_interp': u_interp,
164
+ 'J_encoded': J_encoded ## for the isometry loss.
165
+ }
models/architectures/vanilla_ode.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vanilla Neural ODE Model Architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.optim as optim
7
+ import torchdiffeq
8
+ import itertools
9
+
10
+ from ..base import BaseModel
11
+ from ..components.mlp import MLPWithCustomInit
12
+ from ..components.initialization import init_dynamics_network
13
+ from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
14
+
15
+
16
+ class VanillaODEModel(BaseModel):
17
+ def __init__(self, config, device):
18
+ super().__init__(config, device)
19
+ input_dim = config['input_dim']
20
+ hidden_dims_vector_field = config['hidden_dims_vector_field']
21
+
22
+ # External input support
23
+ external_input_dim = config.get('external_input_dim', 0)
24
+ vector_field_input_dim = input_dim + external_input_dim
25
+
26
+ # Create vector field
27
+ self.vector_field = MLPWithCustomInit(
28
+ vector_field_input_dim, hidden_dims_vector_field, input_dim
29
+ ).to(device)
30
+ init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
31
+
32
+ self.optimizers = {
33
+ 'vector_field': optim.Adam(self.vector_field.parameters(), lr=config['learning_rate'])
34
+ }
35
+
36
+ if external_input_dim > 0:
37
+ print(f"External input dimension: {external_input_dim}")
38
+ print(f"Vector field input dimension: {vector_field_input_dim} (state + external)")
39
+ else:
40
+ print(f"Vector field input dimension: {vector_field_input_dim} (state {input_dim})")
41
+
42
+ # Print architecture information (if supported by MLP implementation)
43
+ if hasattr(self.vector_field, 'print_architecture'):
44
+ self.vector_field.print_architecture("Vector Field")
45
+ if hasattr(self.vector_field, 'get_layer_info'):
46
+ config['vector_field_layer_info'] = self.vector_field.get_layer_info()
47
+
48
+ total_params = sum(p.numel() for p in self.vector_field.parameters())
49
+ print(f"Total parameters: {total_params}")
50
+
51
+ def inference(self, xt_batch, ut_batch=None, infer_x=True):
52
+ # xt_batch: [B, L, D+1]
53
+ state_dim = self.config['input_dim']
54
+ x_batch = xt_batch[:, :, :state_dim]
55
+ t_batch = xt_batch[:, :, state_dim]
56
+ B, L, D = x_batch.shape
57
+ x0 = x_batch[:, 0, :]
58
+ t_batch_relative = t_batch - t_batch[:, 0:1]
59
+ max_time = torch.max(t_batch_relative[:, -1])
60
+ t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=L, device=self.device)
61
+
62
+ # Handle external input using utility function
63
+ u_interp = None
64
+ if ut_batch is not None: # and self.has_external_input:
65
+ # u_interp = interpolate_external_input(t_eval, t_batch_relative, ut_batch, self.external_input_dim)
66
+ t_batch_u = ut_batch[:, :, -1]
67
+ t_batch_u = t_batch_u - t_batch[:, 0:1]
68
+ # print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
69
+
70
+ u_interp = interpolate_trajectory(
71
+ t_eval, # [T]
72
+ t_batch_u, # [B, L_u] (already relative)
73
+ ut_batch[:, :, :-1].permute(1, 0, 2) # traj: [L_u, B, U]
74
+ ).permute(1, 0, 2) # -> [T, B, U]
75
+
76
+ # Create enhanced vector field
77
+ vector_field_func = create_vector_field_with_external_input(self.vector_field, u_interp, t_eval)
78
+
79
+ x_trajectory = torchdiffeq.odeint(vector_field_func, x0, t_eval, method=self.config['ode_method'])
80
+
81
+ return {
82
+ 'x0': x0,
83
+ 't_batch': t_batch,
84
+ 't_batch_relative': t_batch_relative,
85
+ 't_eval': t_eval,
86
+ 'x_trajectory': x_trajectory,
87
+ 'u_interp': u_interp,
88
+ }
89
+
models/base.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Base Model Classes for Neural Hybrid Systems
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import os
8
+
9
+
10
+ class BaseModel(nn.Module):
11
+ def __init__(self, config, device):
12
+ super().__init__()
13
+ self.config = config
14
+ self.device = device
15
+ self.optimizers = {}
16
+
17
+ # Support for external inputs
18
+ self.has_external_input = config.get('has_external_input', False)
19
+ self.external_input_dim = config.get('external_input_dim', 0)
20
+
21
+ def zero_grad(self):
22
+ for opt in self.optimizers.values():
23
+ opt.zero_grad()
24
+
25
+ def step(self):
26
+ for opt in self.optimizers.values():
27
+ opt.step()
28
+
29
+ def step_component(self, component_name):
30
+ """Step optimizer for a specific component only"""
31
+ if component_name in self.optimizers:
32
+ self.optimizers[component_name].step()
33
+ else:
34
+ raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}")
35
+
36
+ def zero_grad_component(self, component_name):
37
+ """Zero gradients for a specific component only"""
38
+ if component_name in self.optimizers:
39
+ self.optimizers[component_name].zero_grad()
40
+ else:
41
+ raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}")
42
+
43
+ def inference(self, xt_batch, ut_batch=None):
44
+ """
45
+ Given xt_batch: [B, L, D+1] and optional ut_batch: [B, L, U+1], return dict with
46
+ - x0: [B, D]
47
+ - t_batch: [B, L]
48
+ - flow: [T, B, D] (ODE solution in x or z space)
49
+ - t_eval: [T]
50
+ - z_encoded_traj: [B, L, latent_dim] (if applicable)
51
+ """
52
+ raise NotImplementedError
53
+
54
+ def update(self, loss):
55
+ loss.backward()
56
+ self.step()
57
+
58
+ def save_checkpoint(self, filepath, epoch=None, step=None, best_loss=None, metadata=None):
59
+ """Save model checkpoint with optimizers and metadata"""
60
+ checkpoint = {
61
+ 'model_state_dict': self.state_dict(),
62
+ 'optimizers': {name: opt.state_dict() for name, opt in self.optimizers.items()},
63
+ 'config': self.config,
64
+ 'epoch': epoch,
65
+ 'step': step,
66
+ 'best_loss': best_loss,
67
+ 'metadata': metadata or {}
68
+ }
69
+
70
+ os.makedirs(os.path.dirname(filepath), exist_ok=True)
71
+ torch.save(checkpoint, filepath)
72
+ print(f"Checkpoint saved to: {filepath}")
73
+
74
+ def load_checkpoint(self, filepath, map_location=None, strict=True):
75
+ """Load model checkpoint and restore optimizers"""
76
+ if not os.path.exists(filepath):
77
+ raise FileNotFoundError(f"Checkpoint not found: {filepath}")
78
+
79
+ if map_location is None:
80
+ map_location = self.device
81
+
82
+ checkpoint = torch.load(filepath, map_location=map_location)
83
+
84
+ # Handle different checkpoint formats
85
+ if 'model_state_dict' in checkpoint:
86
+ # New format: checkpoint saved with save_checkpoint()
87
+ try:
88
+ self.load_state_dict(checkpoint['model_state_dict'], strict=strict)
89
+ print(f"Model state loaded from: {filepath} (new format)")
90
+ except RuntimeError as e:
91
+ if "size mismatch" in str(e) and not strict:
92
+ print(f"Warning: Dimension mismatch detected, loading with strict=False")
93
+ print(f"Using current config dimensions as ground truth")
94
+ self.load_state_dict(checkpoint['model_state_dict'], strict=False)
95
+ print(f"Model state loaded with dimension mismatches ignored")
96
+ else:
97
+ raise e
98
+
99
+ # Load optimizer states if available and optimizers exist
100
+ if 'optimizers' in checkpoint and self.optimizers:
101
+ for name, opt_state in checkpoint['optimizers'].items():
102
+ if name in self.optimizers:
103
+ self.optimizers[name].load_state_dict(opt_state)
104
+ print(f"Optimizer '{name}' state loaded")
105
+ else:
106
+ print(f"Warning: Optimizer '{name}' not found in current model, skipping")
107
+
108
+ # Return metadata for trainer to use
109
+ return {
110
+ 'epoch': checkpoint.get('epoch'),
111
+ 'step': checkpoint.get('step'),
112
+ 'best_loss': checkpoint.get('best_loss'),
113
+ 'config': checkpoint.get('config'),
114
+ 'metadata': checkpoint.get('metadata', {})
115
+ }
116
+
117
+ else:
118
+ # Old format: component states saved separately (e.g., 'encoder', 'vector_field', 'decoder')
119
+ print(f"Loading model from old format checkpoint: {filepath}")
120
+
121
+ # Try to load component states with dimension mismatch handling
122
+ loaded_components = []
123
+
124
+ # Load encoder
125
+ if 'encoder' in checkpoint and hasattr(self, 'encoder'):
126
+ try:
127
+ self.encoder.load_state_dict(checkpoint['encoder'], strict=strict)
128
+ loaded_components.append('encoder')
129
+ except RuntimeError as e:
130
+ if "size mismatch" in str(e):
131
+ print(f"Warning: Encoder dimension mismatch, using config dimensions")
132
+ self.encoder.load_state_dict(checkpoint['encoder'], strict=False)
133
+ loaded_components.append('encoder (with mismatches)')
134
+ else:
135
+ print(f"Failed to load encoder: {e}")
136
+
137
+ # Load vector_field
138
+ if 'vector_field' in checkpoint and hasattr(self, 'vector_field'):
139
+ try:
140
+ self.vector_field.load_state_dict(checkpoint['vector_field'], strict=strict)
141
+ loaded_components.append('vector_field')
142
+ except RuntimeError as e:
143
+ if "size mismatch" in str(e):
144
+ print(f"Warning: Vector field dimension mismatch, using config dimensions")
145
+ self.vector_field.load_state_dict(checkpoint['vector_field'], strict=False)
146
+ loaded_components.append('vector_field (with mismatches)')
147
+ else:
148
+ print(f"Failed to load vector_field: {e}")
149
+
150
+ # Load decoder
151
+ if 'decoder' in checkpoint and hasattr(self, 'decoder'):
152
+ try:
153
+ self.decoder.load_state_dict(checkpoint['decoder'], strict=strict)
154
+ loaded_components.append('decoder')
155
+ except RuntimeError as e:
156
+ if "size mismatch" in str(e):
157
+ print(f"Warning: Decoder dimension mismatch, using config dimensions")
158
+ self.decoder.load_state_dict(checkpoint['decoder'], strict=False)
159
+ loaded_components.append('decoder (with mismatches)')
160
+ else:
161
+ print(f"Failed to load decoder: {e}")
162
+
163
+ # Load other components with same error handling pattern
164
+ component_map = {
165
+ 'ag_function': 'ag_function',
166
+ 'rnn': 'rnn',
167
+ 'lstm': 'lstm',
168
+ 'output_layer': 'output_layer',
169
+ 'event_function': 'event_function',
170
+ 'state_reset': 'state_reset'
171
+ }
172
+
173
+ for checkpoint_key, attr_name in component_map.items():
174
+ if checkpoint_key in checkpoint and hasattr(self, attr_name):
175
+ try:
176
+ getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=strict)
177
+ loaded_components.append(attr_name)
178
+ except RuntimeError as e:
179
+ if "size mismatch" in str(e):
180
+ print(f"Warning: {attr_name} dimension mismatch, using config dimensions")
181
+ getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=False)
182
+ loaded_components.append(f"{attr_name} (with mismatches)")
183
+ else:
184
+ print(f"Failed to load {attr_name}: {e}")
185
+
186
+ print(f"Loaded components: {loaded_components}")
187
+
188
+ # Try to load optimizer states (old format: component_name_optimizer)
189
+ if self.optimizers:
190
+ for component_name in loaded_components:
191
+ opt_key = f"{component_name}_optimizer"
192
+ if opt_key in checkpoint and component_name in self.optimizers:
193
+ self.optimizers[component_name].load_state_dict(checkpoint[opt_key])
194
+ print(f"Optimizer '{component_name}' state loaded")
195
+
196
+ # Return metadata (old format may not have all fields)
197
+ return {
198
+ 'epoch': checkpoint.get('epoch'),
199
+ 'step': checkpoint.get('step'),
200
+ 'best_loss': checkpoint.get('best_loss'),
201
+ 'config': checkpoint.get('config'),
202
+ 'metadata': checkpoint.get('experiment_info', {}) # Old format uses 'experiment_info'
203
+ }
models/components/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model Components Package
3
+ """
4
+
5
+ from .mlp import MLPWithCustomInit
6
+ from .initialization import (
7
+ init_dynamics_network, init_autoencoder_network,
8
+ init_event_network, init_reset_network
9
+ )
10
+
11
+ __all__ = [
12
+ 'MLPWithCustomInit',
13
+ 'init_dynamics_network', 'init_autoencoder_network',
14
+ 'init_event_network', 'init_reset_network',
15
+ ]
models/components/initialization.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Network Initialization Functions
3
+ """
4
+
5
+ import torch.nn as nn
6
+
7
+
8
+ def init_dynamics_network(model, scale=0.01):
9
+ """Initialize dynamics network with small random weights"""
10
+ for module in model.modules():
11
+ if isinstance(module, nn.Linear):
12
+ nn.init.normal_(module.weight, mean=0.0, std=scale)
13
+ if module.bias is not None:
14
+ nn.init.zeros_(module.bias)
15
+
16
+
17
+ def init_autoencoder_network(model):
18
+ """Initialize autoencoder network with Xavier initialization"""
19
+ for module in model.modules():
20
+ if isinstance(module, nn.Linear):
21
+ nn.init.xavier_uniform_(module.weight, gain=nn.init.calculate_gain('tanh'))
22
+ if module.bias is not None:
23
+ nn.init.zeros_(module.bias)
24
+
25
+
26
+ def init_event_network(model, scale=0.1):
27
+ """
28
+ Initialize event function network with large standard deviation.
29
+
30
+ Large initialization helps prevent runaway event boundaries where the event function
31
+ is never triggered, which would result in no gradients and model degeneration to
32
+ vanilla Neural ODE.
33
+
34
+ Args:
35
+ model: Event function network
36
+ scale: Standard deviation for weight initialization (default: 0.1, larger than dynamics)
37
+ """
38
+ for module in model.modules():
39
+ if isinstance(module, nn.Linear):
40
+ nn.init.normal_(module.weight, mean=0.0, std=scale)
41
+ if module.bias is not None:
42
+ # Initialize bias with small random values to encourage event triggering
43
+ nn.init.normal_(module.bias, mean=0.0, std=scale * 0.5)
44
+
45
+
46
+ def init_reset_network(model, scale=0.05):
47
+ """
48
+ Initialize state reset network with moderate variance.
49
+
50
+ Reset networks should provide meaningful state perturbations without being too
51
+ aggressive, so we use moderate initialization variance.
52
+
53
+ Args:
54
+ model: State reset network
55
+ scale: Standard deviation for weight initialization (default: 0.05)
56
+ """
57
+ for module in model.modules():
58
+ if isinstance(module, nn.Linear):
59
+ nn.init.normal_(module.weight, mean=0.0, std=scale)
60
+ if module.bias is not None:
61
+ nn.init.zeros_(module.bias)
models/components/mlp.py ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-Layer Perceptron with Custom Initialization
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ # --- Added activation registry with custom sine (supports autograd) ---
9
+ class SinActFunc(torch.autograd.Function):
10
+ @staticmethod
11
+ def forward(ctx, x):
12
+ ctx.save_for_backward(x)
13
+ return x.sin()
14
+ @staticmethod
15
+ def backward(ctx, grad_out):
16
+ (x,) = ctx.saved_tensors
17
+ return grad_out * x.cos()
18
+
19
+ class Sine(nn.Module):
20
+ def forward(self, x):
21
+ return SinActFunc.apply(x)
22
+
23
+ ACTIVATION_REGISTRY = {
24
+ 'relu': lambda: nn.ReLU(),
25
+ 'tanh': lambda: nn.Tanh(),
26
+ 'sin': lambda: Sine(),
27
+ }
28
+
29
+ def get_activation(name: str):
30
+ key = name.lower()
31
+ if key not in ACTIVATION_REGISTRY:
32
+ raise ValueError(f"Unknown activation '{name}'. Available: {list(ACTIVATION_REGISTRY.keys())}")
33
+ return ACTIVATION_REGISTRY[key]()
34
+
35
+
36
+ def act_prime(mod, a):
37
+ """
38
+ Unified activation derivative for manual Jacobian.
39
+ a: pre-activation tensor.
40
+ """
41
+ if isinstance(mod, nn.ReLU):
42
+ return (a > 0).to(a.dtype)
43
+ if isinstance(mod, nn.LeakyReLU):
44
+ ns = getattr(mod, "negative_slope", 0.01)
45
+ return torch.where(a > 0, torch.ones_like(a), torch.full_like(a, ns))
46
+ if isinstance(mod, nn.Tanh):
47
+ ta = torch.tanh(a)
48
+ return 1.0 - ta * ta
49
+ if isinstance(mod, nn.Sigmoid):
50
+ s = torch.sigmoid(a)
51
+ return s * (1.0 - s)
52
+ if isinstance(mod, nn.Softplus):
53
+ beta = getattr(mod, "beta", 1.0)
54
+ return torch.sigmoid(beta * a)
55
+ if isinstance(mod, nn.SiLU): # Swish
56
+ s = torch.sigmoid(a)
57
+ return s * (1.0 + a * (1.0 - s))
58
+ if isinstance(mod, nn.ELU):
59
+ alpha = getattr(mod, "alpha", 1.0)
60
+ return torch.where(a > 0, torch.ones_like(a), alpha * torch.exp(a))
61
+ if isinstance(mod, nn.Identity):
62
+ return torch.ones_like(a)
63
+ if isinstance(mod, Sine):
64
+ return torch.cos(a)
65
+ raise NotImplementedError(f"Jacobian for activation {mod.__class__.__name__} not implemented.")
66
+
67
+ class MLPWithCustomInit(nn.Module):
68
+ def __init__(self, input_dim, hidden_dims, output_dim, activation=nn.ReLU, init_type='kaiming', activation_per_layer=None, dim_in_linear=[0, 0], dim_out_linear=0, hidden_dim_linear=[]):
69
+ super().__init__()
70
+ layers = []
71
+
72
+ # Store linear branch parameters
73
+ self.dim_in_linear = dim_in_linear
74
+ self.dim_out_linear = dim_out_linear
75
+ self.hidden_dim_linear = hidden_dim_linear
76
+
77
+ # Validate dim_in_linear format
78
+ if not isinstance(dim_in_linear, (list, tuple)) or len(dim_in_linear) != 2:
79
+ raise ValueError(f"dim_in_linear must be a list/tuple of length 2 [start_idx, end_idx], got {dim_in_linear}")
80
+
81
+ start_idx, end_idx = dim_in_linear
82
+ linear_input_size = end_idx - start_idx
83
+
84
+ # Create linear/MLP branch if specified
85
+ if linear_input_size > 0 and dim_out_linear > 0:
86
+ if start_idx < 0 or end_idx > input_dim or start_idx >= end_idx:
87
+ raise ValueError(f"Invalid dim_in_linear {dim_in_linear}: must satisfy 0 <= start < end <= {input_dim}")
88
+ if dim_out_linear > output_dim:
89
+ raise ValueError(f"dim_out_linear ({dim_out_linear}) cannot be larger than output_dim ({output_dim})")
90
+
91
+ # Create branch network (linear or MLP)
92
+ if len(hidden_dim_linear) == 0:
93
+ # Simple linear branch
94
+ self.linear_branch = nn.Linear(linear_input_size, dim_out_linear)
95
+ branch_type = 'linear_branch'
96
+ else:
97
+ # MLP branch
98
+ branch_layers = []
99
+ branch_dims = [linear_input_size] + hidden_dim_linear + [dim_out_linear]
100
+
101
+ for i, (in_dim, out_dim) in enumerate(zip(branch_dims[:-1], branch_dims[1:])):
102
+ branch_layers.append(nn.Linear(in_dim, out_dim))
103
+ # Add activation for all layers except the last one
104
+ if i < len(branch_dims) - 2:
105
+ branch_layers.append(activation())
106
+
107
+ self.linear_branch = nn.Sequential(*branch_layers)
108
+ branch_type = 'mlp_branch'
109
+
110
+ # MLP output dimension is total minus branch output
111
+ mlp_output_dim = output_dim - dim_out_linear
112
+ total_output_dim = output_dim
113
+ else:
114
+ self.linear_branch = None
115
+ mlp_output_dim = output_dim
116
+ total_output_dim = output_dim
117
+ linear_input_size = 0
118
+ branch_type = None
119
+
120
+ dims = [input_dim] + hidden_dims
121
+
122
+ # Record layer information for debugging/analysis
123
+ self.layer_info = {
124
+ 'input_dim': input_dim,
125
+ 'hidden_dims': hidden_dims,
126
+ 'output_dim': output_dim,
127
+ 'mlp_output_dim': mlp_output_dim,
128
+ 'total_output_dim': total_output_dim,
129
+ 'dim_in_linear': dim_in_linear,
130
+ 'linear_input_size': linear_input_size,
131
+ 'dim_out_linear': dim_out_linear,
132
+ 'hidden_dim_linear': hidden_dim_linear,
133
+ 'branch_type': branch_type,
134
+ 'total_layers': len(hidden_dims) + 1,
135
+ 'layer_details': []
136
+ }
137
+
138
+ # Add branch info if exists
139
+ if self.linear_branch is not None:
140
+ if branch_type == 'linear_branch':
141
+ # Simple linear branch
142
+ self.layer_info['layer_details'].append({
143
+ 'layer_idx': -1, # Special index for branch
144
+ 'type': 'linear_branch',
145
+ 'input_dim': linear_input_size,
146
+ 'input_slice': f"[{start_idx}:{end_idx}]",
147
+ 'output_dim': dim_out_linear,
148
+ 'activation': 'None',
149
+ 'parameters': linear_input_size * dim_out_linear + dim_out_linear
150
+ })
151
+ else:
152
+ # MLP branch - record each layer
153
+ branch_dims = [linear_input_size] + hidden_dim_linear + [dim_out_linear]
154
+ total_branch_params = 0
155
+ for i, (in_dim, out_dim) in enumerate(zip(branch_dims[:-1], branch_dims[1:])):
156
+ layer_params = in_dim * out_dim + out_dim
157
+ total_branch_params += layer_params
158
+
159
+ if i < len(branch_dims) - 2:
160
+ # Hidden layer in branch
161
+ self.layer_info['layer_details'].append({
162
+ 'layer_idx': f"b{i}", # Branch layer index
163
+ 'type': 'branch_hidden',
164
+ 'input_dim': in_dim,
165
+ 'output_dim': out_dim,
166
+ 'input_slice': f"[{start_idx}:{end_idx}]" if i == 0 else None,
167
+ 'activation': activation().__class__.__name__,
168
+ 'parameters': layer_params
169
+ })
170
+ else:
171
+ # Output layer in branch
172
+ self.layer_info['layer_details'].append({
173
+ 'layer_idx': f"b{i}",
174
+ 'type': 'branch_output',
175
+ 'input_dim': in_dim,
176
+ 'output_dim': out_dim,
177
+ 'input_slice': f"[{start_idx}:{end_idx}]" if i == 0 and len(branch_dims) == 2 else None,
178
+ 'activation': 'None',
179
+ 'parameters': layer_params
180
+ })
181
+
182
+ # Handle activation per layer
183
+ if activation_per_layer is None:
184
+ activation_fns = [activation() for _ in hidden_dims]
185
+ else:
186
+ if len(activation_per_layer) != len(hidden_dims):
187
+ raise ValueError(f"Number of activations ({len(activation_per_layer)}) must match number of hidden layers ({len(hidden_dims)})")
188
+ activation_fns = [get_activation(a) for a in activation_per_layer]
189
+
190
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
191
+ layer_linear = nn.Linear(in_dim, out_dim)
192
+ activation_fn = activation_fns[i]
193
+ layers.append(layer_linear)
194
+ layers.append(activation_fn)
195
+ # Record layer details
196
+ self.layer_info['layer_details'].append({
197
+ 'layer_idx': i,
198
+ 'type': 'hidden',
199
+ 'input_dim': in_dim,
200
+ 'output_dim': out_dim,
201
+ 'activation': activation_fn.__class__.__name__,
202
+ 'parameters': in_dim * out_dim + out_dim
203
+ })
204
+
205
+ # Output layer - use mlp_output_dim instead of output_dim
206
+ output_layer = nn.Linear(dims[-1], mlp_output_dim)
207
+ layers.append(output_layer)
208
+
209
+ # Record output layer details
210
+ self.layer_info['layer_details'].append({
211
+ 'layer_idx': len(hidden_dims),
212
+ 'type': 'output',
213
+ 'input_dim': dims[-1],
214
+ 'output_dim': mlp_output_dim,
215
+ 'activation': 'None',
216
+ 'parameters': dims[-1] * mlp_output_dim + mlp_output_dim
217
+ })
218
+
219
+ # Calculate total parameters
220
+ self.layer_info['total_parameters'] = sum(detail['parameters'] for detail in self.layer_info['layer_details'])
221
+
222
+ self.net = nn.Sequential(*layers)
223
+ self.activation = activation
224
+ self.init_type = init_type
225
+ self.apply(self._init_weights)
226
+
227
+ def _init_weights(self, module):
228
+ if isinstance(module, nn.Linear):
229
+ if self.init_type == 'xavier':
230
+ if isinstance(self.activation(), (nn.Tanh, nn.ReLU)):
231
+ gain = nn.init.calculate_gain('tanh' if isinstance(self.activation(), nn.Tanh) else 'relu')
232
+ nn.init.xavier_uniform_(module.weight, gain=gain)
233
+ else:
234
+ nn.init.xavier_uniform_(module.weight)
235
+ elif self.init_type == 'kaiming':
236
+ nn.init.kaiming_uniform_(module.weight, nonlinearity='relu')
237
+ elif self.init_type == 'orthogonal':
238
+ nn.init.orthogonal_(module.weight, gain=1.0)
239
+ elif self.init_type == 'small_normal':
240
+ nn.init.normal_(module.weight, mean=0.0, std=0.01)
241
+ if module.bias is not None:
242
+ nn.init.zeros_(module.bias)
243
+
244
+ def _apply_network(self, x):
245
+ """Apply the complete network with linear branch if present"""
246
+ if self.linear_branch is not None:
247
+ # Extract slice from input for linear branch
248
+ start_idx, end_idx = self.dim_in_linear
249
+ x1 = x[..., start_idx:end_idx]
250
+ # Apply linear branch
251
+ linear_out = self.linear_branch(x1)
252
+ # Apply MLP to full input
253
+ mlp_out = self.net(x)
254
+ # Concatenate outputs: [linear_out; mlp_out]
255
+ return torch.cat([linear_out, mlp_out], dim=-1)
256
+ else:
257
+ # No linear branch, just apply MLP
258
+ return self.net(x)
259
+
260
+ def forward(self, t=None, x=None, u=None):
261
+ """
262
+ Forward pass - handle both (t, x) and single argument calls, plus external input u
263
+ IMPORTANT: This must maintain compatibility with:
264
+ 1. Hybrid models: encoder([], x_flat) and decoder([], z_flat)
265
+ 2. Vector fields in torchdiffeq: vector_field(t, z) or vector_field(t, z, u)
266
+ 3. Autoregressive models: ag_function(None, ag_input)
267
+ 4. NEW: External input conditioning: vector_field(t, z, u)
268
+ """
269
+ if x is None and u is None:
270
+ # Single argument call: forward(input)
271
+ # This handles autoregressive case: ag_function(ag_input)
272
+ return self._apply_network(t) # t is actually the input
273
+ elif u is None:
274
+ # Two argument call: forward(t, x)
275
+ # This handles hybrid case: encoder([], x_flat) where t=[] and x=x_flat
276
+ # And vector field case: vector_field(t, z) where we use z
277
+ return self._apply_network(x)
278
+ else:
279
+ # Three argument call: forward(t, x, u) - NEW for external conditioning
280
+ # Concatenate x and u for input to network
281
+ if isinstance(u, (int, float)):
282
+ # Scalar u, expand to match batch size
283
+ u_expanded = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype)
284
+ else:
285
+ u_expanded = u
286
+ xu_input = torch.cat([x, u_expanded], dim=-1)
287
+ return self._apply_network(xu_input)
288
+
289
+ def forward_Jac(self, t=None, x=None, u=None):
290
+ """
291
+ Manual forward + Jacobian for the MLP with branch support.
292
+ Returns:
293
+ y: [B, total_out_dim] where total_out_dim = dim_out_linear + mlp_output_dim
294
+ J: [B, total_out_dim, in_dim_eff], where in_dim_eff is the actual input fed to the network
295
+
296
+ The Jacobian accounts for the branch structure: output = [branch(x1); MLP(x)]
297
+ """
298
+ # --- 1) Parse inputs to match your forward conventions ---
299
+ if x is None and u is None:
300
+ inp = t
301
+ elif u is None:
302
+ inp = x
303
+ else:
304
+ if isinstance(u, (int, float)):
305
+ u = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype)
306
+ inp = torch.cat([x, u], dim=-1)
307
+
308
+ B, Din = inp.shape
309
+
310
+ if self.linear_branch is not None:
311
+ # Extract slice from input for branch
312
+ start_idx, end_idx = self.dim_in_linear
313
+ x1 = inp[..., start_idx:end_idx]
314
+
315
+ # Apply branch network
316
+ if len(self.hidden_dim_linear) == 0:
317
+ # Simple linear branch: y_branch = A @ x1 + b
318
+ branch_out = self.linear_branch(x1) # [B, dim_out_linear]
319
+
320
+ # Construct Jacobian for linear branch
321
+ W_branch = self.linear_branch.weight # [dim_out_linear, linear_input_size]
322
+ J_branch = torch.zeros(B, self.dim_out_linear, Din, device=inp.device, dtype=inp.dtype)
323
+ J_branch[:, :, start_idx:end_idx] = W_branch.unsqueeze(0).expand(B, -1, -1)
324
+ else:
325
+ # MLP branch - need to compute Jacobian through the branch network
326
+ y_branch = x1
327
+ J_branch = None
328
+ is_first_branch = True
329
+ a_cache_branch = None
330
+
331
+ # Propagate through branch layers
332
+ for i, module in enumerate(self.linear_branch):
333
+ if isinstance(module, nn.Linear):
334
+ W = module.weight # [out, in]
335
+ b = module.bias # [out]
336
+ a = y_branch @ W.t() + b # pre-activation
337
+
338
+ Wb = W.unsqueeze(0).expand(B, -1, -1) # [B, out, in]
339
+ if is_first_branch:
340
+ J_branch = Wb # [B, out, linear_input_size]
341
+ is_first_branch = False
342
+ else:
343
+ J_branch = torch.bmm(Wb, J_branch) # [B, out, linear_input_size]
344
+
345
+ y_branch = a
346
+ a_cache_branch = a
347
+
348
+ else:
349
+ # Activation function in branch
350
+ ap = act_prime(module, a_cache_branch) # [B, dim]
351
+ y_branch = module(y_branch)
352
+ J_branch = ap.unsqueeze(-1) * J_branch
353
+
354
+ branch_out = y_branch
355
+
356
+ # Expand J_branch to full input dimensions
357
+ J_branch_full = torch.zeros(B, self.dim_out_linear, Din, device=inp.device, dtype=inp.dtype)
358
+ J_branch_full[:, :, start_idx:end_idx] = J_branch
359
+ J_branch = J_branch_full
360
+
361
+ # MLP branch: apply full network to full input
362
+ y_mlp = inp
363
+ J_mlp = None
364
+ is_first = True
365
+ a_cache = None
366
+
367
+ # Propagate through MLP layers
368
+ for module in self.net:
369
+ if isinstance(module, nn.Linear):
370
+ W = module.weight # [out, in]
371
+ b = module.bias # [out]
372
+ a = y_mlp @ W.t() + b # pre-activation
373
+
374
+ Wb = W.unsqueeze(0).expand(B, -1, -1) # [B, out, in]
375
+ if is_first:
376
+ J_mlp = Wb # [B, out, Din]
377
+ is_first = False
378
+ else:
379
+ J_mlp = torch.bmm(Wb, J_mlp) # [B, out, Din]
380
+
381
+ y_mlp = a
382
+ a_cache = a
383
+
384
+ else:
385
+ # Activation function
386
+ ap = act_prime(module, a_cache) # [B, dim]
387
+ y_mlp = module(y_mlp)
388
+
389
+ if J_mlp is None:
390
+ I = torch.eye(Din, device=y_mlp.device, dtype=y_mlp.dtype)
391
+ J_mlp = I.unsqueeze(0).expand(B, Din, Din).clone()
392
+ is_first = False
393
+
394
+ J_mlp = ap.unsqueeze(-1) * J_mlp
395
+
396
+ # Combine outputs and Jacobians
397
+ y = torch.cat([branch_out, y_mlp], dim=-1) # [B, total_out_dim]
398
+ J = torch.cat([J_branch, J_mlp], dim=1) # [B, total_out_dim, Din]
399
+
400
+ else:
401
+ # No linear branch, use original implementation
402
+ y = inp
403
+ J = None
404
+ is_first = True
405
+ a_cache = None
406
+
407
+ for module in self.net:
408
+ if isinstance(module, nn.Linear):
409
+ W = module.weight
410
+ b = module.bias
411
+ a = y @ W.t() + b
412
+
413
+ Wb = W.unsqueeze(0).expand(B, -1, -1)
414
+ if is_first:
415
+ J = Wb
416
+ is_first = False
417
+ else:
418
+ J = torch.bmm(Wb, J)
419
+
420
+ y = a
421
+ a_cache = a
422
+
423
+ else:
424
+ ap = act_prime(module, a_cache)
425
+ y = module(y)
426
+
427
+ if J is None:
428
+ I = torch.eye(Din, device=y.device, dtype=y.dtype)
429
+ J = I.unsqueeze(0).expand(B, Din, Din).clone()
430
+ is_first = False
431
+
432
+ J = ap.unsqueeze(-1) * J
433
+
434
+ return y, J
435
+
436
+ def __call__(self, *args):
437
+ """
438
+ Allow flexible calling - CRITICAL for compatibility
439
+ This ensures all calling patterns work correctly
440
+ """
441
+ if len(args) == 1:
442
+ # Single argument: autoregressive case
443
+ return self._apply_network(args[0])
444
+ elif len(args) == 2:
445
+ # Two arguments: hybrid/ODE case - use second argument (actual data)
446
+ return self._apply_network(args[1])
447
+ elif len(args) == 3:
448
+ # Three arguments: external input case - concatenate second and third
449
+ x, u = args[1], args[2]
450
+ if isinstance(u, (int, float)):
451
+ u_expanded = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype)
452
+ else:
453
+ u_expanded = u
454
+ xu_input = torch.cat([x, u_expanded], dim=-1)
455
+ return self._apply_network(xu_input)
456
+ else:
457
+ raise ValueError(f"Expected 1, 2, or 3 arguments, got {len(args)}")
458
+
459
+ def get_layer_info(self):
460
+ """Return detailed layer information"""
461
+ return self.layer_info
462
+
463
+ def print_architecture(self, name="MLP"):
464
+ """Print a summary of the network architecture"""
465
+ print(f"\n{name} Architecture:")
466
+ print(f" Input dimension: {self.layer_info['input_dim']}")
467
+ if self.linear_branch is not None:
468
+ start_idx, end_idx = self.dim_in_linear
469
+ if self.layer_info['branch_type'] == 'linear_branch':
470
+ print(f" Linear branch: input[{start_idx}:{end_idx}] ({self.layer_info['linear_input_size']}) -> {self.layer_info['dim_out_linear']}")
471
+ else:
472
+ print(f" MLP branch: input[{start_idx}:{end_idx}] ({self.layer_info['linear_input_size']}) -> {self.layer_info['hidden_dim_linear']} -> {self.layer_info['dim_out_linear']}")
473
+ print(f" Total layers: {self.layer_info['total_layers']}")
474
+ print(f" MLP output dimension: {self.layer_info['mlp_output_dim']}")
475
+ print(f" Total output dimension: {self.layer_info['total_output_dim']}")
476
+ print(f" Total parameters: {self.layer_info['total_parameters']}")
477
+ print(" Layer details:")
478
+ for detail in self.layer_info['layer_details']:
479
+ if 'branch' in detail['type']:
480
+ layer_type = detail['type'].replace('_', ' ').title()
481
+ slice_info = f" {detail['input_slice']}" if detail.get('input_slice') else ""
482
+ print(f" {layer_type} {detail['layer_idx']}: "
483
+ f"{detail['input_dim']}{slice_info} -> {detail['output_dim']}, "
484
+ f"activation: {detail['activation']}, "
485
+ f"params: {detail['parameters']}")
486
+ else:
487
+ print(f" Layer {detail['layer_idx']} ({detail['type']}): "
488
+ f"{detail['input_dim']} -> {detail['output_dim']}, "
489
+ f"activation: {detail['activation']}, "
490
+ f"params: {detail['parameters']}")
491
+
492
+
493
+ # --- Example usage (adapt constructor to your class) ---
494
+ if __name__ == "__main__":
495
+ import time
496
+ def _batch_jacobian_autograd(model, x, *, mode="x"):
497
+ """
498
+ Compute Jacobian batch-wise using torch.autograd.functional.jacobian
499
+ Returns: J_auto [B, out_dim, in_dim]
500
+ mode:
501
+ - "x": calls model.forward(inp) where inp=x
502
+ - "tx": calls model.forward(t=None, x=inp)
503
+ """
504
+ x = x.detach()
505
+ B, Din = x.shape
506
+
507
+ if mode == "x":
508
+ def f_single(x_single): # x_single: [Din]
509
+ y_single = model.forward(x_single.unsqueeze(0)).squeeze(0) # [Dout]
510
+ return y_single
511
+ elif mode == "tx":
512
+ def f_single(x_single):
513
+ y_single = model.forward(None, x_single.unsqueeze(0)).squeeze(0)
514
+ return y_single
515
+ else:
516
+ raise NotImplementedError
517
+
518
+ J_list = []
519
+ for b in range(B):
520
+ xb = x[b].clone().requires_grad_(True)
521
+ Jb = torch.autograd.functional.jacobian(
522
+ f_single, xb, create_graph=False, vectorize=True
523
+ ) # [Dout, Din]
524
+ J_list.append(Jb)
525
+ return torch.stack(J_list, dim=0) # [B, Dout, Din]
526
+
527
+
528
+ @torch.no_grad()
529
+ def check_manual_jacobian(model, x, *, atol=1e-6, rtol=1e-5, mode="x", verbose=True):
530
+ """
531
+ Compare manual Jacobian (forward_Jac) vs. autograd Jacobian.
532
+
533
+ Args:
534
+ model: your MLP instance (with .forward and .forward_Jac)
535
+ x: [B, in_dim] input
536
+ atol, rtol: tolerances for allclose
537
+ mode: see _batch_jacobian_autograd; default assumes forward(x)
538
+
539
+ Prints max abs diff and raises AssertionError if mismatch.
540
+ """
541
+ start_manual = time.time()
542
+ y_man, J_man = model.forward_Jac(x) if mode == "x" else model.forward_Jac(None, x)
543
+ end_manual = time.time()
544
+ print(f"Manual Jacobian time: {end_manual - start_manual:.4f} seconds")
545
+
546
+ J_auto = _batch_jacobian_autograd(model, x, mode=mode)
547
+
548
+ start_auto = time.time()
549
+ J_auto = _batch_jacobian_autograd(model, x, mode=mode)
550
+ end_auto = time.time()
551
+ print(f"Autograd Jacobian time: {end_auto - start_auto:.4f} seconds")
552
+
553
+ # print(J_man)
554
+ # print(J_auto)
555
+
556
+ if J_auto.shape != J_man.shape:
557
+ raise RuntimeError(f"Shape mismatch: auto {J_auto.shape} vs manual {J_man.shape}")
558
+
559
+ max_abs = (J_auto - J_man).abs().max().item()
560
+ ok = torch.allclose(J_auto, J_man, atol=atol, rtol=rtol)
561
+
562
+ if verbose:
563
+ print(f"[Jacobian check] shape={tuple(J_man.shape)}, max|Δ|={max_abs:.3e}, "
564
+ f"allclose(atol={atol}, rtol={rtol})={ok}")
565
+
566
+ assert ok, f"Jacobian mismatch: max|Δ|={max_abs:.3e} exceeds tolerances."
567
+
568
+ torch.manual_seed(0)
569
+ # Test with MLP branch using slice [1, 4] - total output dim is 7 (3 from branch + 4 from MLP)
570
+ model = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[1, 4], dim_out_linear=3, hidden_dim_linear=[8, 6]).to('cuda')
571
+ x = torch.randn(512, 6).to('cuda')
572
+ _ = model(x) # warmup
573
+ print(f"Input shape: {x.shape}")
574
+ print(f"Output shape: {model(x).shape}")
575
+ model.print_architecture("MLP with MLP Branch (slice [1:4])")
576
+ check_manual_jacobian(model, x, mode="x")
577
+
578
+ # Test with linear branch (empty hidden_dim_linear)
579
+ model_linear = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[1, 4], dim_out_linear=3, hidden_dim_linear=[]).to('cuda')
580
+ model_linear.print_architecture("MLP with Linear Branch")
581
+ check_manual_jacobian(model_linear, x, mode="x")
582
+
583
+ # Test without branch (original behavior)
584
+ model_orig = MLPWithCustomInit(6, [64] * 3, 4).to('cuda')
585
+ _ = model_orig(x)
586
+ print(f"Original output shape: {model_orig(x).shape}")
587
+ check_manual_jacobian(model_orig, x, mode="x")
588
+ # Test with legacy int format (should work as [0, 2])
589
+ model_legacy = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[0, 2], dim_out_linear=3).to('cuda')
590
+ print(f"Legacy format dim_in_linear=2 -> {model_legacy.dim_in_linear}")
591
+ model_legacy.print_architecture("MLP with Linear Branch (legacy format)")
592
+
593
+ # Test without linear branch (original behavior)
594
+ model_orig = MLPWithCustomInit(6, [64] * 3, 4).to('cuda')
595
+ _ = model_orig(x)
596
+ print(f"Original output shape: {model_orig(x).shape}")
597
+ check_manual_jacobian(model_orig, x, mode="x")
models/factory.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model Factory Functions
3
+ """
4
+
5
+ from .architectures.hybrid import HybridLatentODEModel
6
+ from .architectures.vanilla_ode import VanillaODEModel
7
+ from .architectures.event_ode import EventODEModel
8
+ from .architectures.augmented_ode import AugmentedODEModel
9
+ from .architectures.autoregressive import AutoregressiveModel
10
+
11
+ def create_model(model_type, config, device):
12
+ """
13
+ Factory function to create different types of models
14
+
15
+ Args:
16
+ model_type: 'hybrid', 'vanilla_ode', 'event_ode', 'augmented_ode', 'autoregressive'
17
+ config: Configuration dictionary
18
+ device: PyTorch device
19
+
20
+ Returns:
21
+ model: Instance of BaseModel subclass
22
+ """
23
+ if model_type == 'hybrid':
24
+ from .architectures.hybrid import HybridLatentODEModel
25
+ return HybridLatentODEModel(config, device)
26
+ elif model_type == 'vanilla_ode':
27
+ from .architectures.vanilla_ode import VanillaODEModel
28
+ return VanillaODEModel(config, device)
29
+ elif model_type == 'event_ode':
30
+ from .architectures.event_ode import EventODEModel
31
+ return EventODEModel(config, device)
32
+ elif model_type == 'augmented_ode':
33
+ from .architectures.augmented_ode import AugmentedODEModel
34
+ return AugmentedODEModel(config, device)
35
+ elif model_type == 'autoregressive':
36
+ from .architectures.autoregressive import AutoregressiveModel
37
+ return AutoregressiveModel(config, device)
38
+ else:
39
+ raise ValueError(f"Unknown model type: {model_type}")
40
+
41
+
42
+ # Deprecated functions for backward compatibility
43
+ def create_hybrid_models(config, device):
44
+ """Deprecated: Use create_model('hybrid', config, device) instead"""
45
+ model = HybridLatentODEModel(config, device)
46
+ models = {
47
+ 'encoder': model.encoder,
48
+ 'vector_field': model.vector_field,
49
+ 'decoder': model.decoder
50
+ }
51
+ optimizers = model.optimizers
52
+ return models, optimizers
53
+
54
+
55
+ def create_neural_ode_models(config, device):
56
+ """Deprecated: Use create_model('vanilla_ode', config, device) instead"""
57
+ model = VanillaODEModel(config, device)
58
+ models = {'vector_field': model.vector_field}
59
+ optimizers = {'optimizer': model.optimizers['vector_field']}
60
+ return models, optimizers
61
+
62
+
63
+ def create_event_ode_models(config, device):
64
+ """Deprecated: Use create_model('event_ode', config, device) instead"""
65
+ model = EventODEModel(config, device)
66
+ models = {
67
+ 'vector_field': model.vector_field,
68
+ 'event_function': model.event_function,
69
+ 'state_reset': model.state_reset
70
+ }
71
+ optimizers = {'optimizer': model.optimizers['combined']}
72
+ return models, optimizers
models/utils.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model Utility Functions
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+
9
+ def interpolate_trajectory(t_eval, t_query, traj, dim_batch=1):
10
+ """
11
+ Interpolate trajectory (z or x) at t_query based on t_eval and traj.
12
+ Args:
13
+ t_eval: [T] time grid of ODE solution
14
+ t_query: [B, L] query times for each batch
15
+ traj: [T, B, D] trajectory (z or x)
16
+ dim_batch: batch dimension (default 1)
17
+ Returns:
18
+ interp: [B, L, D]
19
+ """
20
+ B, L = t_query.shape
21
+ T = t_eval.shape[0]
22
+ indices_flat = torch.searchsorted(t_eval, t_query.flatten(), right=False)
23
+ indices = torch.clamp(indices_flat, 0, T - 2).reshape(B, L)
24
+ t_left = t_eval[indices]
25
+ t_right = t_eval[indices + 1]
26
+ batch_indices = torch.arange(B, device=t_eval.device).unsqueeze(1).expand(-1, L)
27
+ traj_left = traj[indices, batch_indices, :]
28
+ traj_right = traj[indices + 1, batch_indices, :]
29
+ weight = ((t_query - t_left) / (t_right - t_left + 1e-8)).unsqueeze(-1)
30
+ interp = traj_left + weight * (traj_right - traj_left)
31
+ return interp
32
+
33
+
34
+ def interpolate_external_input(t_eval, t_batch, ut_batch, external_input_dim):
35
+ """
36
+ Interpolate external input at t_eval times using existing interpolate_trajectory function.
37
+
38
+ Args:
39
+ t_eval: [T] evaluation times
40
+ t_batch: [B, L] relative time batches (not used, for API consistency)
41
+ ut_batch: [B, L_u, U+1] external input batch with time as last dimension
42
+ external_input_dim: dimension of external input
43
+
44
+ Returns:
45
+ u_interp: [T, B, U] interpolated external inputs
46
+ """
47
+ if ut_batch is None or external_input_dim == 0:
48
+ return None
49
+
50
+ u_batch = ut_batch[:, :, :external_input_dim] # [B, L_u, U]
51
+ t_u_batch = ut_batch[:, :, external_input_dim] # [B, L_u]
52
+
53
+ B, L_u, U = u_batch.shape
54
+ T = len(t_eval)
55
+
56
+ # 简单粗暴:用第一个batch的时间网格,假设所有batch时间一致
57
+ t_u = t_u_batch[0, :] - t_batch[0] # [L_u]
58
+ t_query = t_eval.unsqueeze(0).expand(B, -1) # [B, T]
59
+ traj = u_batch.permute(1, 0, 2) # [L_u, B, U]
60
+
61
+ # 直接调用 interpolate_trajectory
62
+ result = interpolate_trajectory(t_u, t_query, traj) # [B, T, U]
63
+
64
+ # 转置成需要的格式 [T, B, U]
65
+ return result.permute(1, 0, 2)
66
+
67
+
68
+ def create_vector_field_with_external_input(base_vector_field, u_interp, t_eval):
69
+ """
70
+ Universal wrapper to add external input to any vector field.
71
+
72
+ Args:
73
+ base_vector_field: Original vector field function
74
+ u_interp: [T, B, U] interpolated external inputs (None if no external input)
75
+ t_eval: [T] evaluation times
76
+
77
+ Returns:
78
+ Enhanced vector field function that includes external input
79
+ """
80
+ if u_interp is None:
81
+ return base_vector_field
82
+
83
+ def vector_field_with_input(t, state):
84
+ t_idx = torch.searchsorted(t_eval, t, right=False)
85
+ t_idx = torch.clamp(t_idx, 0, len(t_eval) - 1)
86
+ u_t = u_interp[t_idx, :, :] # [B, U]
87
+ return base_vector_field(t, state, u_t)
88
+
89
+ return vector_field_with_input
90
+
91
+ class VectorFieldWithInput(nn.Module):
92
+ def __init__(self, base_vector_field: nn.Module, u_interp, t_eval):
93
+ """
94
+ base_vector_field: nn.Module, takes (t, state, u_t) and outputs dx/dt
95
+ u_interp: [T, B, U] external inputs sampled on t_eval (None if no input)
96
+ t_eval: [T] monotonically nondecreasing time grid corresponding to u_interp
97
+ """
98
+ super().__init__()
99
+ self.base_vector_field = base_vector_field
100
+ self.register_buffer("t_eval", t_eval) # 注册成 buffer, 会跟随 device 移动
101
+ if u_interp is not None:
102
+ self.register_buffer("u_interp", u_interp)
103
+ else:
104
+ self.u_interp = None
105
+
106
+ def forward(self, t, state):
107
+ if self.u_interp is None:
108
+ # 无外部输入,保持三输入形式
109
+ return self.base_vector_field(t, state)
110
+
111
+ T = self.t_eval.shape[0]
112
+ t_scalar = torch.as_tensor(t, dtype=self.t_eval.dtype, device=self.t_eval.device)
113
+
114
+ # Clamp to endpoints
115
+ if t_scalar <= self.t_eval[0]:
116
+ u_t = self.u_interp[0, :, :] # [B, U]
117
+ elif t_scalar >= self.t_eval[-1]:
118
+ u_t = self.u_interp[-1, :, :]
119
+ else:
120
+ idx_right = torch.searchsorted(self.t_eval, t_scalar, right=False)
121
+ idx_right = torch.clamp(idx_right, 1, T - 1)
122
+ idx_left = idx_right - 1
123
+
124
+ t_left = self.t_eval[idx_left]
125
+ t_right = self.t_eval[idx_right]
126
+
127
+ u_left = self.u_interp[idx_left, :, :]
128
+ u_right = self.u_interp[idx_right, :, :]
129
+
130
+ w = (t_scalar - t_left) / (t_right - t_left + 1e-8)
131
+ u_t = u_left + w * (u_right - u_left)
132
+
133
+ # 传给原始 vector field(恢复三参数形式,不再拼接)
134
+ return self.base_vector_field(t, state, u_t)
135
+
136
+
137
+ def create_vector_field_with_external_input(base_vector_field, u_interp, t_eval):
138
+ return VectorFieldWithInput(base_vector_field, u_interp, t_eval)
139
+
140
+ # def create_vector_field_with_external_input(base_vector_field, u_interp, t_eval):
141
+ # """
142
+ # Wrap a vector field to inject time-varying external input u(t) via linear interpolation.
143
+
144
+ # Args:
145
+ # base_vector_field: callable(t, state, u_t) -> dx/dt
146
+ # u_interp: [T, B, U] external inputs sampled on t_eval (None if no input)
147
+ # t_eval: [T] monotonically nondecreasing time grid corresponding to u_interp
148
+
149
+ # Returns:
150
+ # vector_field_with_input(t, state): calls base_vector_field with linearly
151
+ # interpolated u(t) aligned to t_eval.
152
+ # """
153
+ # if u_interp is None:
154
+ # return base_vector_field
155
+
156
+ # T = t_eval.shape[0]
157
+
158
+ # def vector_field_with_input(t, state):
159
+ # # Ensure t is a tensor on the same device/dtype as t_eval
160
+ # t_scalar = torch.as_tensor(t, dtype=t_eval.dtype, device=t_eval.device)
161
+
162
+ # # Clamp to endpoints: constant extension outside [t_eval[0], t_eval[-1]]
163
+ # if t_scalar <= t_eval[0]:
164
+ # u_t = u_interp[0, :, :] # [B, U]
165
+ # elif t_scalar >= t_eval[-1]:
166
+ # u_t = u_interp[-1, :, :] # [B, U]
167
+ # else:
168
+ # # Find the right interval [t_left, t_right] with t_left <= t < t_right
169
+ # idx_right = torch.searchsorted(t_eval, t_scalar, right=False)
170
+ # # Guarantee we have both neighbors
171
+ # idx_right = torch.clamp(idx_right, 1, T - 1)
172
+ # idx_left = idx_right - 1
173
+
174
+ # t_left = t_eval[idx_left]
175
+ # t_right = t_eval[idx_right]
176
+
177
+ # u_left = u_interp[idx_left, :, :] # [B, U]
178
+ # u_right = u_interp[idx_right, :, :] # [B, U]
179
+
180
+ # # Linear interpolation weight in [0,1]
181
+ # w = (t_scalar - t_left) / (t_right - t_left + 1e-8)
182
+
183
+ # # Linear interpolation of u(t)
184
+ # u_t = u_left + w * (u_right - u_left) # [B, U]
185
+
186
+ # # Call the original vector field with interpolated control
187
+ # return base_vector_field(t, state, u_t)
188
+
189
+ # return vector_field_with_input
mppi/__init__.py ADDED
File without changes
mppi/mppi/__init__.py ADDED
File without changes
mppi/mppi/mppi.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+ import typing
4
+
5
+ import torch
6
+ from torch.distributions.multivariate_normal import MultivariateNormal
7
+ from arm_pytorch_utilities import handle_batch_input
8
+ from functorch import vmap
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ def _ensure_non_zero(cost, beta, factor):
14
+ return torch.exp(-factor * (cost - beta))
15
+
16
+
17
+ class SpecificActionSampler:
18
+ def __init__(self):
19
+ self.start_idx = 0
20
+ self.end_idx = 0
21
+ self.slice = slice(0, 0)
22
+
23
+ def sample_trajectories(self, state, info):
24
+ raise NotImplementedError
25
+
26
+ def specific_dynamics(self, next_state, state, action, t):
27
+ """Handle dynamics in a specific way for the specific action sampler; defaults to using default dynamics"""
28
+ return next_state
29
+
30
+ def register_sample_start_end(self, start_idx, end_idx):
31
+ self.start_idx = start_idx
32
+ self.end_idx = end_idx
33
+ self.slice = slice(start_idx, end_idx)
34
+
35
+ class MPPI():
36
+ """
37
+ Model Predictive Path Integral control
38
+ This implementation batch samples the trajectories and so scales well with the number of samples K.
39
+
40
+ Implemented according to algorithm 2 in Williams et al., 2017
41
+ 'Information Theoretic MPC for Model-Based Reinforcement Learning',
42
+ based off of https://github.com/ferreirafabio/mppi_pendulum
43
+ """
44
+
45
+ def __init__(self, dynamics, running_cost, nx, noise_sigma, num_samples=100, horizon=15, device="cpu",
46
+ terminal_state_cost=None,
47
+ lambda_=1.,
48
+ noise_mu=None,
49
+ u_min=None,
50
+ u_max=None,
51
+ u_init=None,
52
+ U_init=None,
53
+ u_scale=1,
54
+ u_per_command=1,
55
+ step_dependent_dynamics=False,
56
+ rollout_samples=1,
57
+ rollout_var_cost=0,
58
+ rollout_var_discount=0.95,
59
+ sample_null_action=False,
60
+ specific_action_sampler: typing.Optional[SpecificActionSampler] = None,
61
+ noise_abs_cost=False):
62
+ """
63
+ :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)
64
+ :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)
65
+ :param nx: state dimension
66
+ :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))
67
+ :param num_samples: K, number of trajectories to sample
68
+ :param horizon: T, length of each trajectory
69
+ :param device: pytorch device
70
+ :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state
71
+ :param lambda_: temperature, positive scalar where larger values will allow more exploration
72
+ :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean
73
+ :param u_min: (nu) minimum values for each dimension of control to pass into dynamics
74
+ :param u_max: (nu) maximum values for each dimension of control to pass into dynamics
75
+ :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero
76
+ :param U_init: (T x nu) initial control sequence; defaults to noise
77
+ :param step_dependent_dynamics: whether the passed in dynamics needs horizon step passed in (as 3rd arg)
78
+ :param rollout_samples: M, number of state trajectories to rollout for each control trajectory
79
+ (should be 1 for deterministic dynamics and more for models that output a distribution)
80
+ :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts
81
+ :param rollout_var_discount: Discount of variance cost over control horizon
82
+ :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)
83
+ :param specific_action_sampler: Function to explicitly sample actions to use instead of sampling from noise from
84
+ nominal trajectory, may output a number of action trajectories fewer than horizon
85
+ :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost
86
+ """
87
+ self.d = device
88
+ self.dtype = noise_sigma.dtype
89
+ self.K = num_samples # N_SAMPLES
90
+ self.T = horizon # TIMESTEPS
91
+
92
+ # dimensions of state and control
93
+ self.nx = nx
94
+ self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]
95
+ self.lambda_ = lambda_
96
+
97
+ if noise_mu is None:
98
+ noise_mu = torch.zeros(self.nu, dtype=self.dtype)
99
+
100
+ if u_init is None:
101
+ u_init = torch.zeros_like(noise_mu)
102
+
103
+ # handle 1D edge case
104
+ if self.nu == 1:
105
+ noise_mu = noise_mu.view(-1)
106
+ noise_sigma = noise_sigma.view(-1, 1)
107
+
108
+ # bounds
109
+ self.u_min = u_min
110
+ self.u_max = u_max
111
+ self.u_scale = u_scale
112
+ self.u_per_command = u_per_command
113
+ # make sure if any of them is specified, both are specified
114
+ if self.u_max is not None and self.u_min is None:
115
+ if not torch.is_tensor(self.u_max):
116
+ self.u_max = torch.tensor(self.u_max)
117
+ self.u_min = -self.u_max
118
+ if self.u_min is not None and self.u_max is None:
119
+ if not torch.is_tensor(self.u_min):
120
+ self.u_min = torch.tensor(self.u_min)
121
+ self.u_max = -self.u_min
122
+ if self.u_min is not None:
123
+ self.u_min = self.u_min.to(device=self.d)
124
+ self.u_max = self.u_max.to(device=self.d)
125
+
126
+ self.noise_mu = noise_mu.to(self.d)
127
+ self.noise_sigma = noise_sigma.to(self.d)
128
+ self.noise_sigma_inv = torch.inverse(self.noise_sigma)
129
+ self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)
130
+ # T x nu control sequence
131
+ self.U = U_init
132
+ self.u_init = u_init.to(self.d)
133
+
134
+ if self.U is None:
135
+ self.U = self.noise_dist.sample((self.T,))
136
+
137
+ self.step_dependency = step_dependent_dynamics
138
+ self.F = dynamics
139
+ self.running_cost = running_cost
140
+ self.terminal_state_cost = terminal_state_cost
141
+ self.sample_null_action = sample_null_action
142
+ self.specific_action_sampler = specific_action_sampler
143
+ self.noise_abs_cost = noise_abs_cost
144
+ self.state = None
145
+ self.info = None
146
+
147
+ # handling dynamics models that output a distribution (take multiple trajectory samples)
148
+ self.M = rollout_samples
149
+ self.rollout_var_cost = rollout_var_cost
150
+ self.rollout_var_discount = rollout_var_discount
151
+
152
+ # sampled results from last command
153
+ self.cost_total = None
154
+ self.cost_total_non_zero = None
155
+ self.omega = None
156
+ self.states = None
157
+ self.actions = None
158
+
159
+ def get_params(self):
160
+ return f"K={self.K} T={self.T} M={self.M} lambda={self.lambda_} noise_mu={self.noise_mu.cpu().numpy()} noise_sigma={self.noise_sigma.cpu().numpy()}".replace(
161
+ "\n", ",")
162
+
163
+ @handle_batch_input(n=2)
164
+ def _dynamics(self, state, u, t):
165
+ return self.F(state, u, t) if self.step_dependency else self.F(state, u)
166
+
167
+ @handle_batch_input(n=2)
168
+ def _running_cost(self, state, u, t):
169
+ return self.running_cost(state, u, t) if self.step_dependency else self.running_cost(state, u)
170
+
171
+ def get_action_sequence(self):
172
+ return self.U
173
+
174
+ def shift_nominal_trajectory(self):
175
+ """
176
+ Shift the nominal trajectory forward one step
177
+ """
178
+ # shift command 1 time step
179
+ self.U = torch.roll(self.U, -1, dims=0)
180
+ self.U[-1] = self.u_init
181
+
182
+ def command(self, state, shift_nominal_trajectory=True, info=None):
183
+ """
184
+ :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)
185
+ :param shift_nominal_trajectory: Whether to roll the nominal trajectory forward one step. This should be True
186
+ if the command is to be executed. If the nominal trajectory is to be refined then it should be False.
187
+ :param info: Optional dictionary to store context information
188
+ :returns action: (nu) best action
189
+ """
190
+ self.info = info
191
+ if shift_nominal_trajectory:
192
+ self.shift_nominal_trajectory()
193
+
194
+ return self._command(state)
195
+
196
+ def _compute_weighting(self, cost_total):
197
+ beta = torch.min(cost_total)
198
+ self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)
199
+ eta = torch.sum(self.cost_total_non_zero)
200
+ self.omega = (1. / eta) * self.cost_total_non_zero
201
+ return self.omega
202
+
203
+ def _command(self, state):
204
+ if not torch.is_tensor(state):
205
+ state = torch.tensor(state)
206
+ self.state = state.to(dtype=self.dtype, device=self.d)
207
+ cost_total = self._compute_total_cost_batch()
208
+
209
+ self._compute_weighting(cost_total)
210
+ perturbations = torch.sum(self.omega.view(-1, 1, 1) * self.noise, dim=0)
211
+
212
+ self.U = self.U + perturbations
213
+ action = self.get_action_sequence()[:self.u_per_command]
214
+ # reduce dimensionality if we only need the first command
215
+ if self.u_per_command == 1:
216
+ action = action[0]
217
+ return action
218
+
219
+ def change_horizon(self, horizon):
220
+ if horizon < self.U.shape[0]:
221
+ # truncate trajectory
222
+ self.U = self.U[:horizon]
223
+ elif horizon > self.U.shape[0]:
224
+ # extend with u_init
225
+ self.U = torch.cat((self.U, self.u_init.repeat(horizon - self.U.shape[0], 1)))
226
+ self.T = horizon
227
+
228
+ def reset(self):
229
+ """
230
+ Clear controller state after finishing a trial
231
+ """
232
+ self.U = self.noise_dist.sample((self.T,))
233
+
234
+ def _compute_rollout_costs(self, perturbed_actions):
235
+ K, T, nu = perturbed_actions.shape
236
+ assert nu == self.nu
237
+
238
+ cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)
239
+ cost_samples = cost_total.repeat(self.M, 1)
240
+ cost_var = torch.zeros_like(cost_total)
241
+
242
+ # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state
243
+ if self.state.shape == (K, self.nx):
244
+ state = self.state
245
+ else:
246
+ state = self.state.view(1, -1).repeat(K, 1)
247
+
248
+ # rollout action trajectory M times to estimate expected cost
249
+ state = state.repeat(self.M, 1, 1)
250
+
251
+ states = []
252
+ actions = []
253
+ for t in range(T):
254
+ u = self.u_scale * perturbed_actions[:, t].repeat(self.M, 1, 1)
255
+ next_state = self._dynamics(state, u, t)
256
+ # potentially handle dynamics in a specific way for the specific action sampler
257
+ next_state = self._sample_specific_dynamics(next_state, state, u, t)
258
+ state = next_state
259
+ c = self._running_cost(state, u, t)
260
+ cost_samples = cost_samples + c
261
+ if self.M > 1:
262
+ cost_var += c.var(dim=0) * (self.rollout_var_discount ** t)
263
+
264
+ # Save total states/actions
265
+ states.append(state)
266
+ actions.append(u)
267
+
268
+ # Actions is K x T x nu
269
+ # States is K x T x nx
270
+ actions = torch.stack(actions, dim=-2)
271
+ states = torch.stack(states, dim=-2)
272
+
273
+ # action perturbation cost
274
+ if self.terminal_state_cost:
275
+ c = self.terminal_state_cost(states, actions)
276
+ cost_samples = cost_samples + c
277
+ cost_total = cost_total + cost_samples.mean(dim=0)
278
+ cost_total = cost_total + cost_var * self.rollout_var_cost
279
+ return cost_total, states, actions
280
+
281
+ def _compute_perturbed_action_and_noise(self):
282
+ # parallelize sampling across trajectories
283
+ # resample noise each time we take an action
284
+ noise = self.noise_dist.rsample((self.K, self.T))
285
+ # broadcast own control to noise over samples; now it's K x T x nu
286
+ perturbed_action = self.U + noise
287
+ perturbed_action = self._sample_specific_actions(perturbed_action)
288
+ # naively bound control
289
+ self.perturbed_action = self._bound_action(perturbed_action)
290
+ # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)
291
+ self.noise = self.perturbed_action - self.U
292
+
293
+ def _sample_specific_actions(self, perturbed_action):
294
+ # specific sampling of actions (encoding trajectory prior and domain knowledge to create biases)
295
+ i = 0
296
+ if self.sample_null_action:
297
+ perturbed_action[i] = 0
298
+ i += 1
299
+ if self.specific_action_sampler is not None:
300
+ actions = self.specific_action_sampler.sample_trajectories(self.state, self.info)
301
+ # check how long it is
302
+ actions = actions.reshape(-1, self.T, self.nu)
303
+ perturbed_action[i:i + actions.shape[0]] = actions
304
+ self.specific_action_sampler.register_sample_start_end(i, i + actions.shape[0])
305
+ i += actions.shape[0]
306
+ return perturbed_action
307
+
308
+ def _sample_specific_dynamics(self, next_state, state, u, t):
309
+ if self.specific_action_sampler is not None:
310
+ next_state = self.specific_action_sampler.specific_dynamics(next_state, state, u, t)
311
+ return next_state
312
+
313
+ def _compute_total_cost_batch(self):
314
+ self._compute_perturbed_action_and_noise()
315
+ if self.noise_abs_cost:
316
+ action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv
317
+ # NOTE: The original paper does self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv, but this biases
318
+ # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the
319
+ # nomial trajectory.
320
+ else:
321
+ action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper
322
+
323
+ rollout_cost, self.states, actions = self._compute_rollout_costs(self.perturbed_action)
324
+ self.actions = actions / self.u_scale
325
+
326
+ # action perturbation cost
327
+ perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2))
328
+ self.cost_total = rollout_cost + perturbation_cost
329
+ return self.cost_total
330
+
331
+ def _bound_action(self, action):
332
+ if self.u_max is not None:
333
+ return torch.max(torch.min(action, self.u_max), self.u_min)
334
+ return action
335
+
336
+ def _slice_control(self, t):
337
+ return slice(t * self.nu, (t + 1) * self.nu)
338
+
339
+ def get_rollouts(self, state, num_rollouts=1, U=None):
340
+ """
341
+ :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states
342
+ :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic
343
+ dynamics
344
+ :returns states: num_rollouts x T x nx vector of trajectories
345
+
346
+ """
347
+ state = state.view(-1, self.nx)
348
+ if state.size(0) == 1:
349
+ state = state.repeat(num_rollouts, 1)
350
+
351
+ if U is None:
352
+ U = self.get_action_sequence()
353
+ T = U.shape[0]
354
+ states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=U.dtype, device=U.device)
355
+ states[:, 0] = state
356
+ for t in range(T):
357
+ next_state = self._dynamics(states[:, t].view(num_rollouts, -1),
358
+ self.u_scale * U[t].tile(num_rollouts, 1), t)
359
+ # dynamics may augment state; here we just take the first nx dimensions
360
+ states[:, t + 1] = next_state[:, :self.nx]
361
+
362
+ return states[:, 1:]
mppi/task/__init__.py ADDED
File without changes
mppi/task/bb_track.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Hang
2
+ # Date: 2025-09-20
3
+ # Description: This file contains the implementation of the ball tracking task with MPPI and learned hybrid dynamics model.
4
+
5
+ import mujoco
6
+ import numpy as np
7
+ import argparse
8
+ from typing import Optional, Tuple, Dict, Any
9
+ import time
10
+ import os
11
+ from datetime import datetime
12
+ from dataclasses import dataclass
13
+ from scipy.spatial.transform import Rotation as R
14
+ from tqdm import tqdm
15
+ import pickle
16
+ import yaml
17
+ import torch
18
+ import glob
19
+
20
+
21
+ import sys
22
+ import os
23
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
24
+ from bb_collect import PingPongEnv, PingPongDummyController, Config
25
+ # from mppi.mppi import MPPI
26
+ from models import create_model, interpolate_trajectory
27
+
28
+ class NeuralHybridDynamics:
29
+ def __init__(self, weights_dir: str, device: str = "cpu"):
30
+ self.weights_dir = weights_dir
31
+ self.device = device
32
+ self.config = None
33
+ self.model = None
34
+ self.checkpoint = None
35
+ self.load_checkpoint()
36
+ self.load_config()
37
+ self.load_weights()
38
+
39
+ def load_checkpoint(self):
40
+ """Load the checkpoint with the largest step number from checkpoint_step_*.pth"""
41
+ search_dirs = [self.weights_dir, os.path.join(self.weights_dir, "files")]
42
+ checkpoint_files = []
43
+ for search_dir in search_dirs:
44
+ if os.path.exists(search_dir):
45
+ checkpoint_files.extend(glob.glob(os.path.join(search_dir, "best_model_step_*.pth")))
46
+ checkpoint_files.extend(glob.glob(os.path.join(search_dir, "checkpoint_step_*.pth")))
47
+ if not checkpoint_files:
48
+ raise FileNotFoundError("No checkpoint files found")
49
+
50
+ def extract_step(path):
51
+ name = os.path.basename(path)
52
+ if name.startswith("checkpoint_step_"):
53
+ core = name[len("checkpoint_step_"):]
54
+ core = core.split('.')[0]
55
+ # handle accidental trailing underscores: checkpoint_step_123_.pth
56
+ core = core.strip('_')
57
+ try:
58
+ return int(core)
59
+ except ValueError:
60
+ return -1
61
+ return -1
62
+
63
+ # Prefer checkpoint_step_* files; fall back to previous heuristic if none
64
+ ckpt_step_files = [f for f in checkpoint_files if os.path.basename(f).startswith("checkpoint_step_")]
65
+ if ckpt_step_files:
66
+ checkpoint_path = max(ckpt_step_files, key=extract_step)
67
+ else:
68
+ # fallback: pick latest by sort (previous behavior)
69
+ checkpoint_files.sort()
70
+ checkpoint_path = checkpoint_files[-1]
71
+
72
+ self.checkpoint = torch.load(checkpoint_path, map_location=self.device, weights_only=False)
73
+ print(f"\nCheckpoint from step {self.checkpoint.get('step', 'unknown')}")
74
+
75
+
76
+ def load_config(self):
77
+ """Load ALL parameters from files/config.yaml (no selective picking)."""
78
+
79
+ config_path = os.path.join(self.weights_dir, "config.yaml")
80
+
81
+ if not os.path.exists(config_path):
82
+ raise FileNotFoundError(f"config.yaml not found at {config_path}")
83
+ with open(config_path, "r") as f:
84
+ raw = yaml.safe_load(f)
85
+
86
+ flat = {}
87
+ for k, v in raw.items():
88
+ if isinstance(v, dict) and 'value' in v and len(v) == 1:
89
+ flat[k] = v['value']
90
+ else:
91
+ flat[k] = v
92
+ # Use ALL flattened keys directly as model config
93
+ config = dict(flat)
94
+
95
+ self.config = config
96
+ print(f"\nLoaded config from {config_path}")
97
+
98
+
99
+ def load_weights(self):
100
+ """Create and load the hybrid model (optionally using provided config)"""
101
+ # Create model
102
+ self.model = create_model('hybrid', self.config, self.device)
103
+
104
+ # Load weights with shape checking
105
+ def safe_load_state_dict(module, state_dict, name):
106
+ """Load state dict with shape mismatch handling"""
107
+ current_state = module.state_dict()
108
+ filtered_state = {}
109
+
110
+ for key, value in state_dict.items():
111
+ if key in current_state:
112
+ if current_state[key].shape == value.shape:
113
+ filtered_state[key] = value
114
+ else:
115
+ print(f" Shape mismatch in {name}.{key}: "
116
+ f"expected {current_state[key].shape}, got {value.shape}")
117
+
118
+ module.load_state_dict(filtered_state, strict=False)
119
+ print(f"Loaded {len(filtered_state)}/{len(state_dict)} parameters for {name}")
120
+
121
+ # Load each component (guarded)
122
+ if hasattr(self.model, 'encoder') and (self.model.encoder is not None) and ('encoder' in self.checkpoint):
123
+ safe_load_state_dict(self.model.encoder, self.checkpoint['encoder'], 'encoder')
124
+ else:
125
+ print("Skip loading encoder: missing in model or checkpoint")
126
+
127
+ if hasattr(self.model, 'decoder') and (getattr(self.model, 'decoder', None) is not None) and ('decoder' in self.checkpoint):
128
+ safe_load_state_dict(self.model.decoder, self.checkpoint['decoder'], 'decoder')
129
+ else:
130
+ print("Skip loading decoder: missing in model or checkpoint")
131
+
132
+ if hasattr(self.model, 'vector_field') and (getattr(self.model, 'vector_field', None) is not None) and ('vector_field' in self.checkpoint):
133
+ safe_load_state_dict(self.model.vector_field, self.checkpoint['vector_field'], 'vector_field')
134
+ else:
135
+ print("Skip loading vector_field: missing in model or checkpoint")
136
+
137
+ self.model.eval()
138
+ print("Model loaded successfully")
139
+
140
+
141
+
142
+ class BBTrack:
143
+ def __init__(self, env: PingPongEnv):
144
+
145
+ self.parser = self.parse_bb_track_args()
146
+
147
+ # self.env = env
148
+ self.controller = None
149
+ self.dynamics = NeuralHybridDynamics(self.parser.weights_dir)
150
+
151
+ def parse_bb_track_args(self):
152
+ """Parse command line arguments for data collection"""
153
+ parser = argparse.ArgumentParser(description="Collect bouncing ball dataset")
154
+ parser.add_argument("--steps", type=int, default=1000, help="Steps to run")
155
+ parser.add_argument("--realtime", action="store_true", help="Match simulation speed to real time")
156
+ parser.add_argument("--seed", type=int, default=42, help="Random seed")
157
+ parser.add_argument("--headless", action="store_true", help="Run in headless mode (no rendering)")
158
+ parser.add_argument("--weights_dir", type=str, default="/home/lau/sim/DynaTraj/logs/run-20250920_075505-k227j0rs", help="absolute path to the weights directory")
159
+
160
+ return parser.parse_args()
161
+
162
+ def load_dynamics(self):
163
+ self.dynamics = NeuralHybridDynamics(self.parser.weights_dir)
164
+
165
+
166
+ def run(self):
167
+ args = self.parser
168
+ # Set random seed
169
+ np.random.seed(args.seed)
170
+ config = Config()
171
+ env = PingPongEnv(headless=args.headless, config=config)
172
+ controller = PingPongDummyController(config=config)
173
+
174
+ # Reset environment and controller
175
+ obs = env.reset()
176
+ controller.reset_controller()
177
+
178
+ # Run simulation for 1000 steps
179
+ for step in range(1000):
180
+ # Get action from ball_response controller
181
+ action = controller.get_ball_response_action(env)
182
+
183
+ obs, reward, done, info = env.step(action)
184
+
185
+ if not args.headless:
186
+ # Render every frame for smooth visualization
187
+ env.render()
188
+ if args.realtime:
189
+ time.sleep(env.model.opt.timestep)
190
+ if done:
191
+ obs = env.reset() # Auto-reset if done
192
+ controller.reset_controller() # Reset controller state
193
+
194
+ print("Demo simulation completed")
195
+
196
+
197
+ if __name__ == "__main__":
198
+ bb_track = BBTrack(env=None)