Hang917 commited on
Commit
bf2a289
·
1 Parent(s): 5954bdb

Update: cem better, could follow velocity zero when only action penal

Browse files
logs/run-20250920_181129-yzvsgbbo/checkpoint_step_210000_20250920_213035.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c7adc63aab7b6b41c5db936d6f6e37843ff5035645f6f44ea58d364a89db5db
3
+ size 1869433
logs/run-20250920_181129-yzvsgbbo/config.yaml ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.21.0
4
+ e:
5
+ cqtawjq7q302hpygmjq82o2rncafbv67:
6
+ args:
7
+ - --config
8
+ - ICLR/config/bb6_reduced/bb6_v2.yaml
9
+ - --gpu_id
10
+ - "3"
11
+ codePath: main_cheetah.py
12
+ codePathLocal: main_cheetah.py
13
+ cpu_count: 128
14
+ cpu_count_logical: 255
15
+ cudaVersion: "12.6"
16
+ disk:
17
+ /:
18
+ total: "6598647398400"
19
+ used: "2398938734592"
20
+ email: sangliteng@gmail.com
21
+ executable: /home/sangliteng/miniconda3/envs/learning-hybrid-systems/bin/python3
22
+ git:
23
+ commit: e65e9632c7a9d9bc1847e0a5a83e8e29db0ac56e
24
+ remote: git@github.com:SangliTeng/Leaning-Hybrid-Systems.git
25
+ gpu: NVIDIA RTX 6000 Ada Generation
26
+ gpu_count: 8
27
+ gpu_nvidia:
28
+ - architecture: Ada
29
+ cudaCores: 18176
30
+ memoryTotal: "51527024640"
31
+ name: NVIDIA RTX 6000 Ada Generation
32
+ uuid: GPU-45d30378-435b-de16-3aea-9fc48527fe61
33
+ - architecture: Ada
34
+ cudaCores: 18176
35
+ memoryTotal: "51527024640"
36
+ name: NVIDIA RTX 6000 Ada Generation
37
+ uuid: GPU-19a03a90-a9e0-a194-8d43-c2dcb7925140
38
+ - architecture: Ada
39
+ cudaCores: 18176
40
+ memoryTotal: "51527024640"
41
+ name: NVIDIA RTX 6000 Ada Generation
42
+ uuid: GPU-ea5b1c7d-baf5-6bcb-1ce1-0ee9ca4b5c8f
43
+ - architecture: Ada
44
+ cudaCores: 18176
45
+ memoryTotal: "51527024640"
46
+ name: NVIDIA RTX 6000 Ada Generation
47
+ uuid: GPU-b1a2e98c-e563-a0fe-47ce-cfa29028d5c7
48
+ - architecture: Ada
49
+ cudaCores: 18176
50
+ memoryTotal: "51527024640"
51
+ name: NVIDIA RTX 6000 Ada Generation
52
+ uuid: GPU-208eeaba-0174-d4e0-bc7a-2eb5f7983a6e
53
+ - architecture: Ada
54
+ cudaCores: 18176
55
+ memoryTotal: "51527024640"
56
+ name: NVIDIA RTX 6000 Ada Generation
57
+ uuid: GPU-81a0e787-8873-418d-6ff3-e3f59deb75a0
58
+ - architecture: Ada
59
+ cudaCores: 18176
60
+ memoryTotal: "51527024640"
61
+ name: NVIDIA RTX 6000 Ada Generation
62
+ uuid: GPU-8619099e-16b4-d667-b97f-518c3954df8c
63
+ - architecture: Ada
64
+ cudaCores: 18176
65
+ memoryTotal: "51527024640"
66
+ name: NVIDIA RTX 6000 Ada Generation
67
+ uuid: GPU-8042fac2-fd28-c8e9-668b-ceb33605fb49
68
+ host: hr-6000ada
69
+ memory:
70
+ total: "811164614656"
71
+ os: Linux-5.15.0-143-generic-x86_64-with-glibc2.35
72
+ program: /home/sangliteng/Research/Leaning-Hybrid-Systems/main_cheetah.py
73
+ python: CPython 3.12.11
74
+ root: ./ICLR/bb6_reduced
75
+ startedAt: "2025-09-20T18:11:29.437216Z"
76
+ writerId: cqtawjq7q302hpygmjq82o2rncafbv67
77
+ m: []
78
+ python_version: 3.12.11
79
+ t:
80
+ "1":
81
+ - 1
82
+ "2":
83
+ - 1
84
+ "3":
85
+ - 2
86
+ - 13
87
+ - 15
88
+ - 16
89
+ "4": 3.12.11
90
+ "5": 0.21.0
91
+ "12": 0.21.0
92
+ "13": linux-x86_64
93
+ anti_collapse_weight:
94
+ value: 1000
95
+ batch_size:
96
+ value: 4096
97
+ data_path_test:
98
+ value: None
99
+ data_path_train:
100
+ value: /home/sangliteng/Research/DynaTraj/dataset/bb/bb_ball_reduced.npz
101
+ decoder_batch_size:
102
+ value: 131072
103
+ decoder_finetune_steps:
104
+ value: 200000
105
+ decoder_lr:
106
+ value: 0.01
107
+ default_activation:
108
+ value: ReLU
109
+ dim_linear_in_decoder:
110
+ value:
111
+ - 0
112
+ - 0
113
+ dim_linear_in_encoder:
114
+ value:
115
+ - 0
116
+ - 0
117
+ dim_linear_in_vec_field:
118
+ value:
119
+ - 0
120
+ - 0
121
+ dim_linear_out_decoder:
122
+ value: 0
123
+ dim_linear_out_encoder:
124
+ value: 0
125
+ dim_linear_out_vec_field:
126
+ value: 0
127
+ dynamics_init_scale:
128
+ value: 0.005
129
+ dynamics_loss_type:
130
+ value: l2
131
+ dynamics_weight:
132
+ value: 10
133
+ encoder_lr:
134
+ value: 0.01
135
+ eval_batch_size:
136
+ value: 64
137
+ eval_every:
138
+ value: 2.5e+22
139
+ eval_trajectory_length:
140
+ value: 500
141
+ except_features:
142
+ value: []
143
+ external_input_dim:
144
+ value: 6
145
+ hidden_dim_linear_decoder:
146
+ value: []
147
+ hidden_dim_linear_encoder:
148
+ value: []
149
+ hidden_dim_linear_vec_field:
150
+ value: []
151
+ hidden_dims_dec:
152
+ value:
153
+ - 128
154
+ - 128
155
+ - 128
156
+ - 128
157
+ - 128
158
+ - 128
159
+ - 128
160
+ - 128
161
+ hidden_dims_enc:
162
+ value:
163
+ - 64
164
+ - 64
165
+ - 64
166
+ hidden_dims_vector_field:
167
+ value:
168
+ - 128
169
+ - 128
170
+ input_dim:
171
+ value: 12
172
+ is_lagrangian_system:
173
+ value: true
174
+ isometry_loss_weight:
175
+ value: 1
176
+ latent_dim:
177
+ value: 18
178
+ learning_rate:
179
+ value: 0.0005
180
+ log_interval:
181
+ value: 50
182
+ loss_mode:
183
+ value: z
184
+ max_iso_samples:
185
+ value: 16384
186
+ min_covariance_threshold:
187
+ value: 0.09
188
+ model_type:
189
+ value: hybrid
190
+ normalize_data:
191
+ value: false
192
+ ode_method:
193
+ value: euler
194
+ project_name:
195
+ value: debug architecture
196
+ reconstruction_loss_type:
197
+ value: l2
198
+ run_name:
199
+ value: bb reduced
200
+ save_checkpoint_every:
201
+ value: 250
202
+ smooth_budget:
203
+ value: 0.0001
204
+ smooth_weight:
205
+ value: 0
206
+ steps_per_length:
207
+ value: 2000
208
+ switching_threshold_scale:
209
+ value: 1.5
210
+ switching_weight_multiplier:
211
+ value: 2
212
+ test_info:
213
+ value: same profile as bb, much larger network
214
+ time_step:
215
+ value: 0.01
216
+ train_test_ratio:
217
+ value: 0.95
218
+ trajectory_lengths:
219
+ value:
220
+ - 80
221
+ - 150
222
+ - 200
223
+ - 200
224
+ - 200
225
+ use_switching_weights:
226
+ value: true
227
+ use_weight_smoothing:
228
+ value: false
229
+ vector_field_lr:
230
+ value: 0.01
231
+ viz_interval:
232
+ value: 50
233
+ wandb_base_dir:
234
+ value: ./ICLR/bb6_reduced
235
+ weight_smoothing_window:
236
+ value: 0
237
+ z_continuity_weight:
238
+ value: 10
mppi/mppi/mppi.py CHANGED
@@ -1,346 +1,274 @@
1
- import logging
2
- import time
3
- import typing
4
-
5
  import torch
6
- from torch.distributions.multivariate_normal import MultivariateNormal
7
- from functorch import vmap
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- def _ensure_non_zero(cost, beta, factor):
13
- return torch.exp(-factor * (cost - beta))
14
-
15
-
16
- class SpecificActionSampler:
17
- def __init__(self):
18
- self.start_idx = 0
19
- self.end_idx = 0
20
- self.slice = slice(0, 0)
21
-
22
- def sample_trajectories(self, state, info):
23
- raise NotImplementedError
24
-
25
- def specific_dynamics(self, next_state, state, action, t):
26
- """Handle dynamics in a specific way for the specific action sampler; defaults to using default dynamics"""
27
- return next_state
28
-
29
- def register_sample_start_end(self, start_idx, end_idx):
30
- self.start_idx = start_idx
31
- self.end_idx = end_idx
32
- self.slice = slice(start_idx, end_idx)
33
-
34
- class MPPI():
35
- """
36
- Model Predictive Path Integral control
37
- This implementation batch samples the trajectories and so scales well with the number of samples K.
38
-
39
- Implemented according to algorithm 2 in Williams et al., 2017
40
- 'Information Theoretic MPC for Model-Based Reinforcement Learning',
41
- based off of https://github.com/ferreirafabio/mppi_pendulum
42
- """
43
-
44
- def __init__(self, dynamics, running_cost, nx, noise_sigma, num_samples=100, horizon=15, device="cpu",
45
- terminal_state_cost=None,
46
- lambda_=1,
47
- noise_mu=None,
48
- u_min=None,
49
- u_max=None,
50
- u_init=None,
51
- U_init=None,
52
- u_scale=1,
53
- u_per_command=1,
54
- step_dependent_dynamics=False,
55
- rollout_samples=1,
56
- rollout_var_cost=0,
57
- rollout_var_discount=0.95,
58
- sample_null_action=False,
59
- specific_action_sampler: typing.Optional[SpecificActionSampler] = None,
60
- noise_abs_cost=False):
61
- """
62
- :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)
63
- :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)
64
- :param nx: state dimension
65
- :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))
66
- :param num_samples: K, number of trajectories to sample
67
- :param horizon: T, length of each trajectory
68
- :param device: pytorch device
69
- :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state
70
- :param lambda_: temperature, positive scalar where larger values will allow more exploration
71
- :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean
72
- :param u_min: (nu) minimum values for each dimension of control to pass into dynamics
73
- :param u_max: (nu) maximum values for each dimension of control to pass into dynamics
74
- :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero
75
- :param U_init: (T x nu) initial control sequence; defaults to noise
76
- :param step_dependent_dynamics: whether the passed in dynamics needs horizon step passed in (as 3rd arg)
77
- :param rollout_samples: M, number of state trajectories to rollout for each control trajectory
78
- (should be 1 for deterministic dynamics and more for models that output a distribution)
79
- :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts
80
- :param rollout_var_discount: Discount of variance cost over control horizon
81
- :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)
82
- :param specific_action_sampler: Function to explicitly sample actions to use instead of sampling from noise from
83
- nominal trajectory, may output a number of action trajectories fewer than horizon
84
- :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost
85
- """
86
- self.d = device
87
- self.dtype = noise_sigma.dtype
88
- self.K = num_samples # N_SAMPLES
89
- self.T = horizon # TIMESTEPS
90
-
91
- # dimensions of state and control
92
- self.nx = nx
93
- self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]
94
- self.lambda_ = lambda_
95
-
96
- if noise_mu is None:
97
- noise_mu = torch.zeros(self.nu, dtype=self.dtype)
98
-
99
- if u_init is None:
100
- u_init = torch.zeros_like(noise_mu)
101
-
102
- # handle 1D edge case
103
- if self.nu == 1:
104
- noise_mu = noise_mu.view(-1)
105
- noise_sigma = noise_sigma.view(-1, 1)
106
-
107
- # bounds
108
- self.u_min = u_min
109
- self.u_max = u_max
110
- self.u_scale = u_scale
111
- self.u_per_command = u_per_command
112
- # make sure if any of them is specified, both are specified
113
- if self.u_max is not None and self.u_min is None:
114
- if not torch.is_tensor(self.u_max):
115
- self.u_max = torch.tensor(self.u_max)
116
- self.u_min = -self.u_max
117
- if self.u_min is not None and self.u_max is None:
118
- if not torch.is_tensor(self.u_min):
119
- self.u_min = torch.tensor(self.u_min)
120
- self.u_max = -self.u_min
121
- if self.u_min is not None:
122
- self.u_min = self.u_min.to(device=self.d)
123
- self.u_max = self.u_max.to(device=self.d)
124
-
125
- self.noise_mu = noise_mu.to(self.d)
126
- self.noise_sigma = noise_sigma.to(self.d)
127
- self.noise_sigma_inv = torch.inverse(self.noise_sigma)
128
- self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)
129
- # T x nu control sequence
130
- self.U = U_init
131
- self.u_init = u_init.to(self.d)
132
-
133
- if self.U is None:
134
- self.U = self.noise_dist.sample((self.T,))
135
-
136
- self.step_dependency = step_dependent_dynamics
137
- self.F = dynamics
138
- self.running_cost = running_cost
139
- self.terminal_state_cost = terminal_state_cost
140
- self.sample_null_action = sample_null_action
141
- self.specific_action_sampler = specific_action_sampler
142
- self.noise_abs_cost = noise_abs_cost
143
- self.state = None
144
- self.info = None
145
-
146
- # handling dynamics models that output a distribution (take multiple trajectory samples)
147
- self.M = rollout_samples
148
- self.rollout_var_cost = rollout_var_cost
149
- self.rollout_var_discount = rollout_var_discount
150
-
151
- # sampled results from last command
152
- self.cost_total = None
153
- self.cost_total_non_zero = None
154
- self.omega = None
155
- self.states = None
156
- self.actions = None
157
-
158
- def get_params(self):
159
- return f"K={self.K} T={self.T} M={self.M} lambda={self.lambda_} noise_mu={self.noise_mu.cpu().numpy()} noise_sigma={self.noise_sigma.cpu().numpy()}".replace(
160
- "\n", ",")
161
-
162
- def _dynamics(self, state, u):
163
- return self.F(state, u)
164
-
165
- def _running_cost(self, state, u):
166
- return self.running_cost(state, u)
167
-
168
- def get_action_sequence(self):
169
- return self.U
170
-
171
- def shift_nominal_trajectory(self):
172
- """
173
- Shift the nominal trajectory forward one step
174
- """
175
- # shift command 1 time step
176
- self.U = torch.roll(self.U, -1, dims=0)
177
- self.U[-1] = self.u_init
178
-
179
- def command(self, state, shift_nominal_trajectory=True, info=None):
180
- """
181
- :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)
182
- :param shift_nominal_trajectory: Whether to roll the nominal trajectory forward one step. This should be True
183
- if the command is to be executed. If the nominal trajectory is to be refined then it should be False.
184
- :param info: Optional dictionary to store context information
185
- :returns action: (nu) best action
186
- """
187
- self.info = info
188
- if shift_nominal_trajectory:
189
- self.shift_nominal_trajectory()
190
-
191
- return self._command(state)
192
-
193
- def _compute_weighting(self, cost_total):
194
- beta = torch.min(cost_total)
195
- self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)
196
- eta = torch.sum(self.cost_total_non_zero)
197
- self.omega = (1. / eta) * self.cost_total_non_zero
198
- return self.omega
199
-
200
- def _command(self, state):
201
- if not torch.is_tensor(state):
202
- state = torch.tensor(state)
203
- self.state = state.to(dtype=self.dtype, device=self.d)
204
- cost_total = self._compute_total_cost_batch()
205
-
206
- self._compute_weighting(cost_total)
207
- perturbations = torch.sum(self.omega.view(-1, 1, 1) * self.noise, dim=0)
208
 
209
- self.U = self.U + perturbations
210
- action = self.get_action_sequence()[:self.u_per_command]
211
- # reduce dimensionality if we only need the first command
212
- if self.u_per_command == 1:
213
- action = action[0]
214
- return action
215
 
216
- def change_horizon(self, horizon):
217
- if horizon < self.U.shape[0]:
218
- # truncate trajectory
219
- self.U = self.U[:horizon]
220
- elif horizon > self.U.shape[0]:
221
- # extend with u_init
222
- self.U = torch.cat((self.U, self.u_init.repeat(horizon - self.U.shape[0], 1)))
223
- self.T = horizon
224
 
225
- def reset(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  """
227
- Clear controller state after finishing a trial
 
 
 
 
 
 
 
 
228
  """
229
- self.U = self.noise_dist.sample((self.T,))
230
-
231
- def _compute_rollout_costs(self, perturbed_actions):
232
- K, T, nu = perturbed_actions.shape
233
- assert nu == self.nu
234
-
235
- cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)
236
- cost_samples = cost_total.repeat(self.M, 1)
237
- cost_var = torch.zeros_like(cost_total)
238
-
239
- # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state
240
- if self.state.shape == (K, self.nx):
241
- state = self.state
242
- else:
243
- state = self.state.view(1, -1).repeat(K, 1)
244
-
245
- # rollout action trajectory M times to estimate expected cost
246
-
247
- actions = self.u_scale * perturbed_actions[:, :]
248
- states = self._dynamics(state.unsqueeze(1), actions)
249
- c = self._running_cost(states, actions)
250
-
251
- # Actions is K x T x nu
252
- # States is K x T x nx
253
- # actions = torch.stack(actions, dim=-2)
254
- # states = torch.stack(states, dim=-2)
255
-
256
- # action perturbation cost
257
- if self.terminal_state_cost:
258
- c = self.terminal_state_cost(states, actions)
259
- cost_samples = cost_samples + c
260
- # cost_total = cost_total + cost_samples.mean(dim=0)
261
- # cost_total = cost_total + cost_var * self.rollout_var_cost
262
- cost_total = c.mean(dim=-1)
263
- return cost_total, states, actions
264
-
265
- def _compute_perturbed_action_and_noise(self):
266
- # parallelize sampling across trajectories
267
- # resample noise each time we take an action
268
- noise = self.noise_dist.rsample((self.K, self.T))
269
- # broadcast own control to noise over samples; now it's K x T x nu
270
- perturbed_action = self.U + noise
271
- perturbed_action = self._sample_specific_actions(perturbed_action)
272
- # naively bound control
273
- self.perturbed_action = self._bound_action(perturbed_action)
274
- # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)
275
- self.noise = self.perturbed_action - self.U
276
-
277
- def _sample_specific_actions(self, perturbed_action):
278
- # specific sampling of actions (encoding trajectory prior and domain knowledge to create biases)
279
- i = 0
280
- if self.sample_null_action:
281
- perturbed_action[i] = 0
282
- i += 1
283
- if self.specific_action_sampler is not None:
284
- actions = self.specific_action_sampler.sample_trajectories(self.state, self.info)
285
- # check how long it is
286
- actions = actions.reshape(-1, self.T, self.nu)
287
- perturbed_action[i:i + actions.shape[0]] = actions
288
- self.specific_action_sampler.register_sample_start_end(i, i + actions.shape[0])
289
- i += actions.shape[0]
290
- return perturbed_action
291
-
292
- def _sample_specific_dynamics(self, next_state, state, u, t):
293
- if self.specific_action_sampler is not None:
294
- next_state = self.specific_action_sampler.specific_dynamics(next_state, state, u, t)
295
- return next_state
296
-
297
- def _compute_total_cost_batch(self):
298
- self._compute_perturbed_action_and_noise()
299
- if self.noise_abs_cost:
300
- action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv
301
- # NOTE: The original paper does self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv, but this biases
302
- # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the
303
- # nomial trajectory.
304
- else:
305
- action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper
306
-
307
- rollout_cost, self.states, actions = self._compute_rollout_costs(self.perturbed_action)
308
- self.actions = actions / self.u_scale
309
-
310
- # action perturbation cost
311
- perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2))
312
- self.cost_total = rollout_cost + perturbation_cost
313
- return self.cost_total
314
-
315
- def _bound_action(self, action):
316
- if self.u_max is not None:
317
- return torch.max(torch.min(action, self.u_max), self.u_min)
318
- return action
319
-
320
- def _slice_control(self, t):
321
- return slice(t * self.nu, (t + 1) * self.nu)
322
-
323
- def get_rollouts(self, state, num_rollouts=1, U=None):
 
 
 
 
 
 
 
 
 
324
  """
325
- :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states
326
- :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic
327
- dynamics
328
- :returns states: num_rollouts x T x nx vector of trajectories
329
-
 
 
 
 
 
330
  """
331
- state = state.view(-1, self.nx)
332
- if state.size(0) == 1:
333
- state = state.repeat(num_rollouts, 1)
334
-
335
- if U is None:
336
- U = self.get_action_sequence()
337
- T = U.shape[0]
338
- states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=U.dtype, device=U.device)
339
- states[:, 0] = state
340
- for t in range(T):
341
- next_state = self._dynamics(states[:, t].view(num_rollouts, -1),
342
- self.u_scale * U[t].tile(num_rollouts, 1), t)
343
- # dynamics may augment state; here we just take the first nx dimensions
344
- states[:, t + 1] = next_state[:, :self.nx]
345
-
346
- return states[:, 1:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
 
 
3
  import torch
4
+ import yaml
5
+ import numpy as np
6
+ import datetime
7
+ import matplotlib
8
+ import sys
9
+ import os
10
+ import time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
 
 
 
 
 
 
12
 
 
 
 
 
 
 
 
 
13
 
14
+ class MPPIController:
15
+ """Placeholder MPPI controller for model predictive control."""
16
+
17
+ def __init__(self,
18
+ action_dim: int,
19
+ horizon: int = 10,
20
+ num_samples: int = 1024,
21
+ num_elites: int = 64,
22
+ temperature: float = 1.0,
23
+ max_std: float = 10.0,
24
+ min_std: float = 0.1,
25
+ iterations: int = 3,
26
+ smoothness_weight: float = 0,
27
+ device: str = 'cuda',):
28
+ self.action_dim = action_dim
29
+ self.horizon = horizon
30
+ self.num_samples = num_samples
31
+ self.temperature = temperature
32
+ self.max_std = max_std
33
+ self.min_std = min_std
34
+ self.device = device
35
+ self.iterations = iterations
36
+ self.num_elites = num_elites
37
+ self.smoothness_weight = smoothness_weight
38
+
39
+ # Pre-allocate all tensors for performance optimization
40
+ self._mean_buf = torch.zeros(self.horizon, self.action_dim, device=self.device)
41
+ self._last_mean_buf = torch.zeros(self.horizon, self.action_dim, device=self.device)
42
+ self._std_buf = torch.full((self.horizon, self.action_dim), self.max_std, dtype=torch.float, device=self.device)
43
+
44
+
45
+ # Pre-allocate tensors for MPPI iterations
46
+ self._actions_buffer = torch.empty(self.horizon, self.num_samples, self.action_dim, device=self.device)
47
+ self._actions_sample_buffer = torch.empty(self.horizon, self.num_samples, self.action_dim, device=self.device)
48
+ self._noise_buffer = torch.empty(self.horizon, self.num_samples, self.action_dim, device=self.device)
49
+ self._elite_actions_buffer = torch.empty(self.horizon, self.num_elites, self.action_dim, device=self.device)
50
+ self._elite_value_buffer = torch.empty(self.num_elites, 1, device=self.device)
51
+ self._elite_idxs_buffer = torch.empty(self.num_elites, dtype=torch.long, device=self.device)
52
+ self._score_buffer = torch.empty(self.num_elites, 1, device=self.device)
53
+ self._value_buffer = torch.empty(self.num_samples, 1, device=self.device)
54
+
55
+ # Pre-allocate tensors for working copies of mean and std
56
+ self._mean_work = torch.empty(self.horizon, self.action_dim, device=self.device)
57
+ self._std_work = torch.empty(self.horizon, self.action_dim, device=self.device)
58
+ self._iteration_std_buffer = torch.empty(self.horizon, self.action_dim, device=self.device)
59
+
60
+ # Pre-allocate tensors for smoothness penalty calculations
61
+ self._ref_penalty_buffer = torch.empty(self.horizon, self.num_samples, device=self.device)
62
+ self._action_diff_buffer = torch.empty(self.horizon-1, self.num_samples, self.action_dim, device=self.device)
63
+ self._smoothness_penalty_buffer = torch.empty(self.horizon-1, self.num_samples, device=self.device)
64
+ self._total_penalty_buffer = torch.empty(self.num_samples, 1, device=self.device)
65
+
66
+ # Pre-allocate tensors for weighted calculations
67
+ self._weighted_actions_buffer = torch.empty(self.horizon, self.num_elites, self.action_dim, device=self.device)
68
+ self._action_diff_squared_buffer = torch.empty(self.horizon, self.num_elites, self.action_dim, device=self.device)
69
+
70
+ # Pre-allocate constants for efficiency
71
+ self._ones_elites = torch.ones(self.num_elites, 1, device=self.device)
72
+ self._eps = torch.tensor(1e-9, device=self.device)
73
+ self._decay_factors = torch.tensor([0.8 ** i for i in range(self.iterations)], device=self.device)
74
+ self._discount = torch.tensor(0.99, device=self.device)
75
+
76
+ # Pre-allocate buffers for value estimation rollout
77
+ self._G_buffer = torch.empty(self.num_samples, 1, device=self.device)
78
+ self._termination_buffer = torch.empty(self.num_samples, 1, device=self.device)
79
+ self._discount_factor_buffer = torch.empty(self.num_samples, 1, device=self.device)
80
+ self._not_terminated_buffer = torch.empty(self.num_samples, device=self.device)
81
+ self._qvalue_pred_buffer = torch.empty(self.num_samples, device=self.device)
82
+ self._reward_pred_buffer = torch.empty(self.num_samples, device=self.device)
83
+
84
+ # Pre-allocate buffers for _maxq method
85
+ self._maxq_mean = torch.zeros(self.action_dim, device=self.device)
86
+ self._maxq_std = torch.full((self.action_dim,), self.max_std, dtype=torch.float, device=self.device)
87
+ self._maxq_noise = torch.empty(self.num_samples, self.action_dim, device=self.device)
88
+ self._maxq_actions_sample = torch.empty(self.num_samples, self.action_dim, device=self.device)
89
+ self._maxq_qvalues = torch.empty(self.num_samples, 1, device=self.device)
90
+ self._maxq_elite_qvalues = torch.empty(self.num_elites, 1, device=self.device)
91
+ self._maxq_elite_actions = torch.empty(self.num_elites, self.action_dim, device=self.device)
92
+ self._maxq_score = torch.empty(self.num_elites, device=self.device)
93
+ self._maxq_weighted_actions = torch.empty(self.num_elites, self.action_dim, device=self.device)
94
+ self._maxq_action_diff = torch.empty(self.num_elites, self.action_dim, device=self.device)
95
+
96
+ # Add step counter and save directory for Q-value visualization
97
+ self.step_count = 0
98
+
99
+ # Initialize action sequence with zeros (will be updated during planning)
100
+ self.action_sequence = torch.zeros(horizon, action_dim, device=device)
101
+
102
+ # Placeholders for dynamics and cost function (will be set externally)
103
+ self.dynamics = None
104
+ self.cost_function = None
105
+
106
+
107
+ def _plan(self, obs: torch.Tensor, ext_obs: torch.Tensor, t0: bool = False, ) -> torch.Tensor:
108
  """
109
+ Plan actions using MPPI with world model.
110
+
111
+ Args:
112
+ obs: Current observations [B, obs_dim]
113
+ ext_obs: External observations (same as obs for now)
114
+ t0: Whether this is the first timestep
115
+
116
+ Returns:
117
+ actions: Planned actions [B, action_dim]
118
  """
119
+ env_size = obs.shape[0]
120
+
121
+ for i in range(env_size):
122
+ # Use pre-allocated working copies instead of cloning
123
+ self._mean_work.copy_(self._mean_buf)
124
+ self._std_work.copy_(self._std_buf)
125
+ mean = self._mean_work
126
+ std = self._std_work
127
+
128
+ if not t0:
129
+ # Shift previous solution: [a1, a2, ..., aH] -> [a2, a3, ..., aH, 0]
130
+ if self.horizon > 1:
131
+ mean[:-1].copy_(self._mean_buf[1:])
132
+ mean[-1].zero_()
133
+ std[:-1].copy_(self._std_buf[1:])
134
+ std[-1].fill_(self.max_std)
135
+ else:
136
+ mean.copy_(self._mean_buf)
137
+ std.copy_(self._std_buf)
138
+
139
+ # Use pre-allocated actions buffer
140
+ actions = self._actions_buffer
141
+
142
+ # Iterate MPPI
143
+ for iter_idx in range(self.iterations):
144
+ # Gradually reduce sampling variance across iterations
145
+ decay_factor = self._decay_factors[iter_idx]
146
+ torch.mul(std, decay_factor, out=self._iteration_std_buffer)
147
+ self._iteration_std_buffer.clamp_(self.min_std, self.max_std)
148
+
149
+ # Sample actions with reduced std over iterations
150
+ if iter_idx == 0:
151
+ # First iteration: fresh random sampling
152
+ torch.randn(self.horizon, self.num_samples, self.action_dim,
153
+ device=self.device, out=self._noise_buffer)
154
+ else:
155
+ # Subsequent iterations: add smaller perturbations around current mean
156
+ torch.randn(self.horizon, self.num_samples, self.action_dim,
157
+ device=self.device, out=self._noise_buffer)
158
+ self._noise_buffer *= 0.5
159
+
160
+ # Compute actions_sample in-place
161
+ torch.addcmul(mean.unsqueeze(1), self._iteration_std_buffer.unsqueeze(1),
162
+ self._noise_buffer, out=self._actions_sample_buffer)
163
+ self._actions_sample_buffer.clamp_(-1, 1)
164
+ actions.copy_(self._actions_sample_buffer)
165
+
166
+ # Compute costs for all action samples
167
+ self._estimate_cost(obs[i:i+1], actions, mean, self._value_buffer)
168
+ self._value_buffer.nan_to_num_(0)
169
+
170
+ # Detach gradients before topk since we don't need gradients for MPPI
171
+ value_detached = self._value_buffer.detach()
172
+
173
+ # Get elite indices using pre-allocated buffer (note: we want minimum cost)
174
+ torch.topk(-value_detached.squeeze(1), self.num_elites, dim=0,
175
+ out=(self._elite_value_buffer.squeeze(1), self._elite_idxs_buffer))
176
+
177
+ # Extract elite actions using advanced indexing into pre-allocated buffer
178
+ self._elite_actions_buffer.copy_(actions[:, self._elite_idxs_buffer].detach())
179
+ self._elite_value_buffer.copy_(self._value_buffer[self._elite_idxs_buffer].detach())
180
+
181
+ # Update parameters using pre-allocated buffers
182
+ min_value = self._elite_value_buffer.min()
183
+
184
+ # Compute scores in-place (lower cost = higher score)
185
+ torch.sub(min_value, self._elite_value_buffer, out=self._score_buffer)
186
+ self._score_buffer.mul_(self.temperature)
187
+ self._score_buffer.exp_()
188
+
189
+ # Normalize scores
190
+ score_sum = self._score_buffer.sum() + self._eps
191
+ self._score_buffer.div_(score_sum)
192
+
193
+ # Weighted average of elite actions using pre-allocated buffer
194
+ torch.mul(self._score_buffer.unsqueeze(0), self._elite_actions_buffer, out=self._weighted_actions_buffer)
195
+ torch.sum(self._weighted_actions_buffer, dim=1, out=mean)
196
+ self._last_mean_buf.copy_(mean)
197
+
198
+ # Compute std as weighted variance using pre-allocated buffers
199
+ torch.sub(self._elite_actions_buffer, mean.unsqueeze(1), out=self._action_diff_squared_buffer)
200
+ self._action_diff_squared_buffer.pow_(2)
201
+ torch.mul(self._score_buffer.unsqueeze(0), self._action_diff_squared_buffer,
202
+ out=self._weighted_actions_buffer)
203
+ torch.sum(self._weighted_actions_buffer, dim=1, out=std)
204
+ std.sqrt_()
205
+ std.clamp_(self.min_std, self.max_std)
206
+
207
+ # Use mean directly for the first action
208
+ action_t = mean[0]
209
+
210
+ # Update buffers for next planning step
211
+ self._mean_buf.copy_(mean)
212
+ self._std_buf.copy_(std)
213
+
214
+ # TODO: for now just assume one env
215
+ break
216
+
217
+ # repeat action for each env
218
+ actions = action_t.repeat(env_size, 1) # size [env_size, act_dims]
219
+
220
+ return actions
221
+
222
+ def _estimate_cost(self, obs, actions, mean, value_buffer) -> torch.Tensor:
223
  """
224
+ Estimate cost using dynamics rollout and cost function.
225
+
226
+ Args:
227
+ obs: Current observation [1, obs_dim]
228
+ actions: Action sequences [horizon, num_samples, action_dim]
229
+ mean: Current mean actions [horizon, action_dim]
230
+ value_buffer: Buffer to store costs [num_samples, 1]
231
+
232
+ Returns:
233
+ values: Costs for each action sequence [num_samples, 1]
234
  """
235
+ # Transpose actions to [num_samples, horizon, action_dim] for dynamics rollout
236
+ actions_transposed = actions.transpose(0, 1) # [num_samples, horizon, action_dim]
237
+
238
+ # Expand initial state to match number of samples
239
+ state0_expanded = obs.expand(self.num_samples, -1).unsqueeze(1) # [num_samples, 1, obs_dim]
240
+
241
+ # Rollout dynamics to get predicted states (detach to avoid gradients)
242
+ with torch.no_grad():
243
+ predicted_states = self.dynamics.rollout_steps(state0_expanded, actions_transposed) # [num_samples, horizon, state_dim]
244
+
245
+ # Compute costs using the cost function
246
+ costs = self.cost_function(predicted_states.detach(), actions_transposed.detach()) # [num_samples, horizon]
247
+
248
+ # Sum costs over horizon to get total cost for each trajectory
249
+ total_costs = torch.sum(costs, dim=1, keepdim=True) # [num_samples, 1]
250
+
251
+ # Add action smoothness penalty using pre-allocated buffers
252
+ smoothness_weight = self.smoothness_weight
253
+
254
+ # Penalty for deviation from reference (previous optimal) actions
255
+ torch.sub(actions, self._last_mean_buf.unsqueeze(1), out=self._actions_sample_buffer)
256
+ self._actions_sample_buffer.pow_(2)
257
+ torch.sum(self._actions_sample_buffer, dim=2, out=self._ref_penalty_buffer)
258
+ ref_penalty_sum = torch.sum(self._ref_penalty_buffer, dim=0, keepdim=True).T
259
+
260
+ # Penalty for action changes within the trajectory
261
+ torch.sub(actions[1:], actions[:-1], out=self._action_diff_buffer)
262
+ self._action_diff_buffer.pow_(2)
263
+ torch.sum(self._action_diff_buffer, dim=2, out=self._smoothness_penalty_buffer)
264
+ smoothness_penalty_sum = torch.sum(self._smoothness_penalty_buffer, dim=0, keepdim=True).T
265
+
266
+ # Combine penalties
267
+ torch.add(ref_penalty_sum, smoothness_penalty_sum, out=self._total_penalty_buffer)
268
+ self._total_penalty_buffer.mul_(smoothness_weight)
269
+
270
+ # Combine base cost with penalties
271
+ value_buffer.copy_(total_costs)
272
+ value_buffer.add_(self._total_penalty_buffer)
273
+
274
+ return value_buffer
mppi/task/bb_track.py CHANGED
@@ -22,7 +22,7 @@ import sys
22
  import os
23
  sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
24
  from bb_collect import PingPongEnv, PingPongDummyController, Config
25
- from mppi.mppi.mppi import MPPI
26
  from models import create_model, interpolate_trajectory
27
 
28
  class NeuralHybridDynamics:
@@ -171,7 +171,8 @@ class NeuralHybridDynamics:
171
  at_batch[:, 0, 1:] = action[:, 0, :] # use only the first frame to init the encoder
172
 
173
  # Inference on the model's internal eval grid, then decode to x_trajectory
174
- out = self.model.inference(xt_batch, at_batch, infer_x=True)
 
175
  t_eval = out.get('t_eval', t_batch) # [B, Te] or [Te]
176
  x_traj = out['x_trajectory'] # [B, Te, Dx]
177
 
@@ -183,12 +184,13 @@ class BBTrack:
183
  def __init__(self):
184
  self.parser = self.parse_bb_track_args()
185
  self.control_dt = 1/50 # 50Hz
186
- self.horizon = 4
187
- self.iterations = 6
188
  self.state_dims = 33
189
- self.num_samples = 100
 
190
  self.device = "cuda:0"
191
- self.noise_sigma = torch.diag(torch.tensor([1., 1., 1., 0.2, 0.2, 0.2], dtype=torch.float32, device=self.device))
192
  self.current_simulation_time = 0
193
  # Objective
194
  self.omega = 0.5
@@ -204,11 +206,21 @@ class BBTrack:
204
  self.dynamics = NeuralHybridDynamics(self.parser.weights_dir, self.control_dt, self.horizon, self.device)
205
 
206
  print("\n=======Loading Controller=======")
207
- self.controller = MPPI(self.dynamics.rollout_steps, self.cost_function,
208
- self.state_dims, self.noise_sigma, self.num_samples, self.horizon, self.device,
209
- u_min = torch.tensor([-2,-2,-2, -0.2,-0.2,-0.2], device=self.device),
210
- u_max = torch.tensor([2,2,2, 0.2,0.2,0.2], device=self.device),
211
- step_dependent_dynamics=False,)
 
 
 
 
 
 
 
 
 
 
212
 
213
  def parse_bb_track_args(self):
214
  """Parse command line arguments for data collection"""
@@ -278,8 +290,8 @@ class BBTrack:
278
  control_cost = torch.sum(action ** 2, dim=-1)
279
 
280
  # Combine costs with weights
281
- tracking_weight = 0.0
282
- control_weight = 10
283
 
284
 
285
  total_cost = tracking_weight * pos_cost + control_weight * control_cost
@@ -300,11 +312,18 @@ class BBTrack:
300
  # update simulation time
301
  self.current_simulation_time = self.env.data.time
302
 
303
- # Get action from mppi, control frequency 50hz, smaller than simulation frequency 1000hz
304
  if step % (1/self.control_dt) == 0:
305
- # for i in range(self.iterations):
306
- # self.controller.command(obs, shift_nominal_trajectory=False)
307
- action = self.controller.command(obs, shift_nominal_trajectory=True)
 
 
 
 
 
 
 
 
308
 
309
  obs, reward, done, info = self.env.step(action)
310
 
 
22
  import os
23
  sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
24
  from bb_collect import PingPongEnv, PingPongDummyController, Config
25
+ from mppi.mppi.mppi import MPPIController
26
  from models import create_model, interpolate_trajectory
27
 
28
  class NeuralHybridDynamics:
 
171
  at_batch[:, 0, 1:] = action[:, 0, :] # use only the first frame to init the encoder
172
 
173
  # Inference on the model's internal eval grid, then decode to x_trajectory
174
+ out = self.model.inference(xt_batch.to(torch.float32), at_batch.to(torch.float32), infer_x=True)
175
+
176
  t_eval = out.get('t_eval', t_batch) # [B, Te] or [Te]
177
  x_traj = out['x_trajectory'] # [B, Te, Dx]
178
 
 
184
  def __init__(self):
185
  self.parser = self.parse_bb_track_args()
186
  self.control_dt = 1/50 # 50Hz
187
+ self.horizon = 20
188
+ self.iterations = 4
189
  self.state_dims = 33
190
+ self.num_samples = 1000
191
+ self.num_elites = 10
192
  self.device = "cuda:0"
193
+ self.noise_sigma = torch.diag(torch.tensor([1., 1., 1., 1, 1, 1], dtype=torch.float32, device=self.device))
194
  self.current_simulation_time = 0
195
  # Objective
196
  self.omega = 0.5
 
206
  self.dynamics = NeuralHybridDynamics(self.parser.weights_dir, self.control_dt, self.horizon, self.device)
207
 
208
  print("\n=======Loading Controller=======")
209
+ self.controller = MPPIController(
210
+ action_dim=6, # 6D action: vx,vy,vz,wx,wy,wz
211
+ horizon=self.horizon,
212
+ num_samples=self.num_samples,
213
+ num_elites=self.num_elites,
214
+ temperature=1.0,
215
+ max_std=1.0,
216
+ min_std=0.1,
217
+ iterations=self.iterations,
218
+ smoothness_weight=0.1,
219
+ device=self.device
220
+ )
221
+ # Set dynamics and cost function for the controller
222
+ self.controller.dynamics = self.dynamics
223
+ self.controller.cost_function = self.cost_function
224
 
225
  def parse_bb_track_args(self):
226
  """Parse command line arguments for data collection"""
 
290
  control_cost = torch.sum(action ** 2, dim=-1)
291
 
292
  # Combine costs with weights
293
+ tracking_weight = 10
294
+ control_weight = 1
295
 
296
 
297
  total_cost = tracking_weight * pos_cost + control_weight * control_cost
 
312
  # update simulation time
313
  self.current_simulation_time = self.env.data.time
314
 
 
315
  if step % (1/self.control_dt) == 0:
316
+ # Convert obs to torch tensor if needed
317
+ if isinstance(obs, np.ndarray):
318
+ obs_tensor = torch.from_numpy(obs).float().to(self.device).unsqueeze(0) # Add batch dim
319
+ else:
320
+ obs_tensor = obs
321
+
322
+ # Call the MPPI controller
323
+ action_tensor = self.controller._plan(obs_tensor, obs_tensor, t0=(step==0))
324
+ action = action_tensor.cpu().numpy().flatten() # Convert back to numpy
325
+
326
+
327
 
328
  obs, reward, done, info = self.env.step(action)
329