Hang917 commited on
Commit
c1dea15
·
1 Parent(s): e7f2648

UPDATE: traj visualize, sin cost

Browse files
Files changed (2) hide show
  1. mppi/mppi/mppi.py +1 -1
  2. mppi/task/bb1d_track.py +191 -73
mppi/mppi/mppi.py CHANGED
@@ -155,7 +155,7 @@ class MPPIController:
155
  # Subsequent iterations: add smaller perturbations around current mean
156
  torch.randn(self.horizon, self.num_samples, self.action_dim,
157
  device=self.device, out=self._noise_buffer)
158
- self._noise_buffer *= 0.5
159
 
160
  # Compute actions_sample in-place
161
  torch.addcmul(mean.unsqueeze(1), self._iteration_std_buffer.unsqueeze(1),
 
155
  # Subsequent iterations: add smaller perturbations around current mean
156
  torch.randn(self.horizon, self.num_samples, self.action_dim,
157
  device=self.device, out=self._noise_buffer)
158
+ self._noise_buffer *= 0.9
159
 
160
  # Compute actions_sample in-place
161
  torch.addcmul(mean.unsqueeze(1), self._iteration_std_buffer.unsqueeze(1),
mppi/task/bb1d_track.py CHANGED
@@ -102,7 +102,8 @@ class NeuralHybridDynamics:
102
  def load_weights(self):
103
  """Create and load the hybrid model (optionally using provided config)"""
104
  # Create model
105
- self.model = create_model('hybrid', self.config, self.device)
 
106
 
107
  # Load weights with shape checking
108
  def safe_load_state_dict(module, state_dict, name):
@@ -119,17 +120,19 @@ class NeuralHybridDynamics:
119
  f"expected {current_state[key].shape}, got {value.shape}")
120
 
121
  module.load_state_dict(filtered_state, strict=False)
 
 
122
 
123
- # Load each component (guarded)
124
- if hasattr(self.model, 'encoder') and (self.model.encoder is not None) and ('encoder' in self.checkpoint):
125
- safe_load_state_dict(self.model.encoder, self.checkpoint['encoder'], 'encoder')
126
- else:
127
- raise ValueError("Skip loading encoder: missing in model or checkpoint")
128
-
129
- if hasattr(self.model, 'decoder') and (getattr(self.model, 'decoder', None) is not None) and ('decoder' in self.checkpoint):
130
- safe_load_state_dict(self.model.decoder, self.checkpoint['decoder'], 'decoder')
131
- else:
132
- raise ValueError("Skip loading decoder: missing in model or checkpoint")
133
 
134
  if hasattr(self.model, 'vector_field') and (getattr(self.model, 'vector_field', None) is not None) and ('vector_field' in self.checkpoint):
135
  safe_load_state_dict(self.model.vector_field, self.checkpoint['vector_field'], 'vector_field')
@@ -175,8 +178,9 @@ class NeuralHybridDynamics:
175
 
176
  t_eval = out.get('t_eval', t_batch) # [B, Te] or [Te]
177
  x_traj = out['x_trajectory'] # [B, Te, Dx]
178
- z_traj_raw = out['z_trajectory'] # [Te, B, latent_dim] -> [20, 512, 4]
179
- self.z_traj = z_traj_raw.permute(1, 0, 2) # Convert to [B, Te, latent_dim] -> [512, 20, 4]
 
180
 
181
  # Interpolate from t_eval to our target sampling times
182
  x_pred = interpolate_trajectory(t_eval, t_batch - t_batch[:, :1], x_traj) # [B, H, Dx]
@@ -217,14 +221,14 @@ class BBTrack:
217
  self.horizon = 20
218
  self.iterations = 10
219
  self.state_dims = 4
220
- self.num_samples = 256
221
  self.num_elites = 16
222
  self.device = "cuda:0"
223
  self.current_simulation_time = 0
224
  # Objective
225
- self.omega = 2
226
- self.amplitude = 1
227
- self.offset = 0.8
228
  self.latent_cost = True
229
  np.random.seed(self.parser.seed)
230
 
@@ -232,6 +236,9 @@ class BBTrack:
232
  self.env_config = Config()
233
  self.env = PingPongEnv(headless=self.parser.headless, config=self.env_config)
234
 
 
 
 
235
  print("\n=======Loading Dynamics=======")
236
  self.dynamics = NeuralHybridDynamics(self.parser.weights_dir, self.control_dt, self.horizon, self.device)
237
 
@@ -255,44 +262,76 @@ class BBTrack:
255
  def parse_bb_track_args(self):
256
  """Parse command line arguments for data collection"""
257
  parser = argparse.ArgumentParser(description="Collect bouncing ball dataset")
258
- parser.add_argument("--steps", type=int, default=8000, help="Steps to run")
259
  parser.add_argument("--realtime", action="store_true", help="Match simulation speed to real time")
260
  parser.add_argument("--seed", type=int, default=42, help="Random seed")
261
  parser.add_argument("--headless", action="store_true", help="Run in headless mode (no rendering)")
 
262
  parser.add_argument("--weights_dir", type=str, default="/home/lau/sim/DynaTraj/logs/run-20250923_060650-u2cujbdh", help="absolute path to the weights directory")
 
263
 
264
  return parser.parse_args()
265
 
266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  def cost_function(self, state, action):
268
- '''
269
- cost function for bouncing ball tracking
270
- Objective: ball should track a sin wave trajectory
271
- x: 0
272
- y: 0
273
- z: abs(sin(t))
274
- Other cost:
275
- board velocity(x,y,z,wx,wy,wz) as control input should be small
276
- Input:
277
- state: [batch, horizon, state_dim]
278
- action: [batch, horizon, action_dim]
279
- t: in
280
- Output:
281
- cost: [batch, horizon]
282
-
283
- STATE:
284
- ball_pos, # ball position (3)
285
- ball_vel, # ball velocity (3)
286
- board_pos, # board position (3)
287
- board_euler, # board euler angles (3)
288
- board_vel, # board velocity (3)
289
- board_angvel # board angular velocity (3)
290
- ball_pos_relative, # ball position relative to board (3) ball_pos - board_pos
291
- ball_vel_relative, # ball velocity relative to board (3) ball_vel - board_vel
292
- board_euler, # board euler angles (3)
293
- board_vel, # board velocity (3)
294
- board_angvel # board angular velocity (3)
295
- '''
296
 
297
  if not self.latent_cost:
298
 
@@ -303,9 +342,8 @@ class BBTrack:
303
  board_vel = state[..., 3]
304
 
305
 
306
- # Target trajectory: x=0, y=0, z=abs(sin(t))
307
- target_x = 0.0
308
- target_y = 0.0
309
 
310
  # Calculate target trajectory for each time step in horizon
311
  batch_size, horizon_len = ball_pos.shape[:2]
@@ -314,30 +352,17 @@ class BBTrack:
314
  target_z = torch.abs(torch.sin(self.omega * future_times)) * self.amplitude + self.offset # [horizon]
315
  target_z = target_z.unsqueeze(0).expand(batch_size, -1) # [batch, horizon]
316
 
317
- target_z = torch.ones_like(target_z) * self.offset
 
 
 
318
 
319
- # print("Decoded:zt:",ball_pos[0,0])
320
- # print("target_z:",target_z[0,0])
321
 
322
  # Tracking cost - quadratic penalty for deviation from target
323
- pos_cost = (ball_pos - target_z) ** 2
324
- vel_cost = (ball_vel) ** 2 + (board_vel) ** 2
325
- # pos_cost = (board_pos - target_z) ** 2 *100 # TODO
326
-
327
-
328
- # Control cost - penalize large board velocities (action is 6D: vx,vy,vz,wx,wy,wz)
329
- control_cost = torch.sum(action ** 2, dim=-1)
330
-
331
- # Combine costs with weights
332
- tracking_weight = 10
333
- vel_weight = 1
334
- control_weight = 0
335
 
336
- total_cost = tracking_weight * pos_cost +\
337
- vel_weight * vel_cost +\
338
- control_weight * control_cost
339
-
340
- return total_cost
341
 
342
  if self.latent_cost:
343
  ball_pos = state[..., 0] # [batch, horizon]
@@ -347,7 +372,16 @@ class BBTrack:
347
 
348
  # Create target state for each time step
349
  target_state = state.clone()
350
- target_state[..., 0] = torch.ones_like(target_state[..., 0]) * self.offset
 
 
 
 
 
 
 
 
 
351
  target_state[..., 1] = torch.ones_like(target_state[..., 1]) * 0
352
  target_state[..., 2] = torch.ones_like(target_state[..., 2]) * self.offset
353
  target_state[..., 3] = torch.ones_like(target_state[..., 3]) * 0
@@ -360,6 +394,87 @@ class BBTrack:
360
  cost = torch.sum((target_z_traj - current_z_traj) ** 2, dim=-1)
361
 
362
  return cost
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
 
364
 
365
 
@@ -374,6 +489,9 @@ class BBTrack:
374
 
375
  # update simulation time
376
  self.current_simulation_time = self.env.data.time
 
 
 
377
 
378
  if step % (1/self.control_dt) == 0:
379
  # Convert obs to torch tensor if needed
@@ -382,10 +500,10 @@ class BBTrack:
382
  else:
383
  obs_tensor = obs
384
 
385
- print("Ball GT:zt:",obs_tensor[0,0])
386
- print("Board GT:zt:",obs_tensor[0,2]) # TODO
387
  target_z = torch.abs(torch.sin(self.omega * torch.tensor(self.current_simulation_time,device=self.device))) * self.amplitude + self.offset # [horizon]
388
- # print("target_z:",target_z)
389
 
390
  # Call the MPPI controller
391
  action_tensor = self.controller._plan(obs_tensor, obs_tensor, t0=(step==0))
 
102
  def load_weights(self):
103
  """Create and load the hybrid model (optionally using provided config)"""
104
  # Create model
105
+ self.model_type = self.config['model_type']
106
+ self.model = create_model(self.model_type, self.config, self.device)
107
 
108
  # Load weights with shape checking
109
  def safe_load_state_dict(module, state_dict, name):
 
120
  f"expected {current_state[key].shape}, got {value.shape}")
121
 
122
  module.load_state_dict(filtered_state, strict=False)
123
+
124
+ if self.model_type == 'hybrid':
125
 
126
+ # Load each component (guarded)
127
+ if hasattr(self.model, 'encoder') and (self.model.encoder is not None) and ('encoder' in self.checkpoint):
128
+ safe_load_state_dict(self.model.encoder, self.checkpoint['encoder'], 'encoder')
129
+ else:
130
+ raise ValueError("Skip loading encoder: missing in model or checkpoint")
131
+
132
+ if hasattr(self.model, 'decoder') and (getattr(self.model, 'decoder', None) is not None) and ('decoder' in self.checkpoint):
133
+ safe_load_state_dict(self.model.decoder, self.checkpoint['decoder'], 'decoder')
134
+ else:
135
+ raise ValueError("Skip loading decoder: missing in model or checkpoint")
136
 
137
  if hasattr(self.model, 'vector_field') and (getattr(self.model, 'vector_field', None) is not None) and ('vector_field' in self.checkpoint):
138
  safe_load_state_dict(self.model.vector_field, self.checkpoint['vector_field'], 'vector_field')
 
178
 
179
  t_eval = out.get('t_eval', t_batch) # [B, Te] or [Te]
180
  x_traj = out['x_trajectory'] # [B, Te, Dx]
181
+ if self.model_type == 'hybrid':
182
+ z_traj_raw = out['z_trajectory'] # [Te, B, latent_dim] -> [20, 512, 4]
183
+ self.z_traj = z_traj_raw.permute(1, 0, 2) # Convert to [B, Te, latent_dim] -> [512, 20, 4]
184
 
185
  # Interpolate from t_eval to our target sampling times
186
  x_pred = interpolate_trajectory(t_eval, t_batch - t_batch[:, :1], x_traj) # [B, H, Dx]
 
221
  self.horizon = 20
222
  self.iterations = 10
223
  self.state_dims = 4
224
+ self.num_samples = 512
225
  self.num_elites = 16
226
  self.device = "cuda:0"
227
  self.current_simulation_time = 0
228
  # Objective
229
+ self.omega = 5
230
+ self.amplitude = 0.4
231
+ self.offset = 0.6
232
  self.latent_cost = True
233
  np.random.seed(self.parser.seed)
234
 
 
236
  self.env_config = Config()
237
  self.env = PingPongEnv(headless=self.parser.headless, config=self.env_config)
238
 
239
+ # Add target visualization
240
+ self.setup_target_visualization()
241
+
242
  print("\n=======Loading Dynamics=======")
243
  self.dynamics = NeuralHybridDynamics(self.parser.weights_dir, self.control_dt, self.horizon, self.device)
244
 
 
262
  def parse_bb_track_args(self):
263
  """Parse command line arguments for data collection"""
264
  parser = argparse.ArgumentParser(description="Collect bouncing ball dataset")
265
+ parser.add_argument("--steps", type=int, default=16000, help="Steps to run")
266
  parser.add_argument("--realtime", action="store_true", help="Match simulation speed to real time")
267
  parser.add_argument("--seed", type=int, default=42, help="Random seed")
268
  parser.add_argument("--headless", action="store_true", help="Run in headless mode (no rendering)")
269
+ # parser.add_argument("--weights_dir", type=str, default="/home/lau/sim/DynaTraj/logs/run-20250923_164030-16e9sup5", help="absolute path to the weights directory") # ode
270
  parser.add_argument("--weights_dir", type=str, default="/home/lau/sim/DynaTraj/logs/run-20250923_060650-u2cujbdh", help="absolute path to the weights directory")
271
+
272
 
273
  return parser.parse_args()
274
 
275
 
276
+ # def cost_function(self, state, action):
277
+
278
+
279
+ # if not self.latent_cost:
280
+
281
+ # # Extract ball position from state (first 3 dimensions)
282
+ # ball_pos = state[..., 0] # [batch, 3] or [batch, horizon, 3]
283
+ # ball_vel = state[..., 1]
284
+ # board_pos = state[..., 2]
285
+ # board_vel = state[..., 3]
286
+
287
+
288
+ # target_state = state.clone()
289
+ # target_state[..., 0] = torch.ones_like(target_state[..., 0]) * self.offset
290
+ # target_state[..., 1] = torch.ones_like(target_state[..., 1]) * 0
291
+ # target_state[..., 2] = torch.ones_like(target_state[..., 2]) * self.offset
292
+ # target_state[..., 3] = torch.ones_like(target_state[..., 3]) * 0
293
+
294
+
295
+ # # Calculate target trajectory for each time step in horizon
296
+ # # batch_size, horizon_len = ball_pos.shape[:2]
297
+ # # time_steps = torch.arange(horizon_len, dtype=torch.float32, device=ball_pos.device)
298
+ # # future_times = self.current_simulation_time + time_steps * self.control_dt
299
+ # # target_z = torch.abs(torch.sin(self.omega * future_times)) * self.amplitude + self.offset # [horizon]
300
+ # # target_z = target_z.unsqueeze(0).expand(batch_size, -1) # [batch, horizon]
301
+
302
+ # # target_z = torch.ones_like(target_z) * self.offset
303
+
304
+ # # Tracking cost - quadratic penalty for deviation from target
305
+ # cost = torch.sum((target_state - state) ** 2, dim=-1)
306
+
307
+ # return cost
308
+
309
+ # if self.latent_cost:
310
+ # ball_pos = state[..., 0] # [batch, horizon]
311
+ # ball_vel = state[..., 1]
312
+ # board_pos = state[..., 2]
313
+ # board_vel = state[..., 3]
314
+
315
+ # # Create target state for each time step
316
+ # target_state = state.clone()
317
+ # target_state[..., 0] = torch.ones_like(target_state[..., 0]) * self.offset
318
+ # target_state[..., 1] = torch.ones_like(target_state[..., 1]) * 0
319
+ # target_state[..., 2] = torch.ones_like(target_state[..., 2]) * self.offset
320
+ # target_state[..., 3] = torch.ones_like(target_state[..., 3]) * 0
321
+
322
+ # # Batch encode target states directly
323
+ # target_z_traj = self.dynamics.encode_state(target_state) # [batch, horizon, latent_dim]
324
+ # current_z_traj = self.dynamics.z_traj # [batch, horizon, latent_dim]
325
+
326
+ # # Compute cost: [batch, horizon]
327
+ # cost = torch.sum((target_z_traj - current_z_traj) ** 2, dim=-1)
328
+
329
+ # return cost
330
+
331
+
332
+
333
  def cost_function(self, state, action):
334
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
  if not self.latent_cost:
337
 
 
342
  board_vel = state[..., 3]
343
 
344
 
345
+ target_state = state.clone()
346
+
 
347
 
348
  # Calculate target trajectory for each time step in horizon
349
  batch_size, horizon_len = ball_pos.shape[:2]
 
352
  target_z = torch.abs(torch.sin(self.omega * future_times)) * self.amplitude + self.offset # [horizon]
353
  target_z = target_z.unsqueeze(0).expand(batch_size, -1) # [batch, horizon]
354
 
355
+ target_state[..., 0] = target_z
356
+ target_state[..., 1] = torch.ones_like(target_state[..., 1]) * 0
357
+ target_state[..., 2] = torch.ones_like(target_state[..., 2]) * self.offset
358
+ target_state[..., 3] = torch.ones_like(target_state[..., 3]) * 0
359
 
360
+ # target_z = torch.ones_like(target_z) * self.offset
 
361
 
362
  # Tracking cost - quadratic penalty for deviation from target
363
+ cost = torch.sum((target_state - state) ** 2, dim=-1)
 
 
 
 
 
 
 
 
 
 
 
364
 
365
+ return cost
 
 
 
 
366
 
367
  if self.latent_cost:
368
  ball_pos = state[..., 0] # [batch, horizon]
 
372
 
373
  # Create target state for each time step
374
  target_state = state.clone()
375
+
376
+
377
+ # Calculate target trajectory for each time step in horizon
378
+ batch_size, horizon_len = ball_pos.shape[:2]
379
+ time_steps = torch.arange(horizon_len, dtype=torch.float32, device=ball_pos.device)
380
+ future_times = self.current_simulation_time + time_steps * self.control_dt
381
+ target_z = torch.abs(torch.sin(self.omega * future_times)) * self.amplitude + self.offset # [horizon]
382
+ target_z = target_z.unsqueeze(0).expand(batch_size, -1) # [batch, horizon]
383
+
384
+ target_state[..., 0] = target_z
385
  target_state[..., 1] = torch.ones_like(target_state[..., 1]) * 0
386
  target_state[..., 2] = torch.ones_like(target_state[..., 2]) * self.offset
387
  target_state[..., 3] = torch.ones_like(target_state[..., 3]) * 0
 
394
  cost = torch.sum((target_z_traj - current_z_traj) ** 2, dim=-1)
395
 
396
  return cost
397
+
398
+ def setup_target_visualization(self):
399
+ """Add a target sphere to the MuJoCo environment for visualization"""
400
+ if not self.parser.headless:
401
+ # Add a visual target sphere to the model as mocap body
402
+ target_xml = '''
403
+ <body name="target_sphere" pos="0 0 0.5" mocap="true">
404
+ <geom name="target_geom" type="sphere" size="0.03"
405
+ rgba="1 0 0 0.7" contype="0" conaffinity="0"/>
406
+ </body>
407
+ '''
408
+
409
+ # Parse the existing model XML and add the target sphere
410
+ import xml.etree.ElementTree as ET
411
+
412
+ # Get the current model XML string
413
+ model_xml = self.env.model_xml
414
+
415
+ # Parse XML
416
+ root = ET.fromstring(model_xml)
417
+ worldbody = root.find('worldbody')
418
+
419
+ # Parse and add the target sphere
420
+ target_element = ET.fromstring(target_xml)
421
+ worldbody.append(target_element)
422
+
423
+ # Recreate the model with target sphere
424
+ new_xml = ET.tostring(root, encoding='unicode')
425
+ self.env.model = mujoco.MjModel.from_xml_string(new_xml)
426
+ self.env.data = mujoco.MjData(self.env.model)
427
+
428
+ # Get the target body ID for updating position
429
+ self.target_body_id = mujoco.mj_name2id(self.env.model, mujoco.mjtObj.mjOBJ_BODY, "target_sphere")
430
+
431
+ # Recreate the renderer/viewer with new model
432
+ if hasattr(self.env, 'viewer') and self.env.viewer is not None:
433
+ self.env.viewer.close()
434
+ try:
435
+ import mujoco.viewer as mj_viewer
436
+ self.env.viewer = mj_viewer.launch_passive(self.env.model, self.env.data)
437
+ except ImportError:
438
+ pass
439
+
440
+ if hasattr(self.env, 'renderer') and self.env.renderer is not None:
441
+ self.env.renderer.close()
442
+ self.env.renderer = mujoco.Renderer(self.env.model,
443
+ height=self.env_config.video_height,
444
+ width=self.env_config.video_width)
445
+
446
+ def update_target_visualization(self):
447
+ """Update the target sphere position based on current trajectory"""
448
+ if not self.parser.headless and hasattr(self, 'target_body_id'):
449
+ target_pos = self.vis_tar_traj(self.env)
450
+
451
+ # Update target sphere position using mocap
452
+ if target_pos is not None:
453
+ target_pos_np = target_pos.cpu().numpy() if isinstance(target_pos, torch.Tensor) else target_pos
454
+ if target_pos_np.ndim > 1:
455
+ target_pos_np = target_pos_np.flatten()[:3]
456
+
457
+ # Use mocap to control the target sphere position
458
+ mocap_id = self.env.model.body_mocapid[self.target_body_id]
459
+ if mocap_id >= 0:
460
+ self.env.data.mocap_pos[mocap_id] = target_pos_np[:3]
461
+ # Debug: print every 50 steps to see if it's updating
462
+ if hasattr(self, '_debug_counter'):
463
+ self._debug_counter += 1
464
+ else:
465
+ self._debug_counter = 0
466
+
467
+ else:
468
+ print("Warning: Target sphere is not a mocap body")
469
+
470
+ def vis_tar_traj(self, env):
471
+ target_z = torch.abs(torch.sin(self.omega * torch.tensor(self.current_simulation_time,device=self.device))) * self.amplitude + self.offset
472
+ target_x = torch.ones_like(target_z) * 0
473
+ target_y = torch.ones_like(target_z) * 0
474
+ target_pos = torch.stack([target_x, target_y, target_z], dim=-1)
475
+ # visualize target trajectory
476
+ return target_pos
477
+
478
 
479
 
480
 
 
489
 
490
  # update simulation time
491
  self.current_simulation_time = self.env.data.time
492
+
493
+ # Update target visualization
494
+ self.update_target_visualization()
495
 
496
  if step % (1/self.control_dt) == 0:
497
  # Convert obs to torch tensor if needed
 
500
  else:
501
  obs_tensor = obs
502
 
503
+ # print("Ball GT:zt:",obs_tensor[0,0])
504
+ # print("Board GT:zt:",obs_tensor[0,2]) # TODO
505
  target_z = torch.abs(torch.sin(self.omega * torch.tensor(self.current_simulation_time,device=self.device))) * self.amplitude + self.offset # [horizon]
506
+ # print("Ball Tar:zt::",target_z)
507
 
508
  # Call the MPPI controller
509
  action_tensor = self.controller._plan(obs_tensor, obs_tensor, t0=(step==0))