Hang917 commited on
Commit
e31c4ee
·
1 Parent(s): 139b5a2

UPDATE: could stablize the board height

Browse files
logs/run-20250923_060650-u2cujbdh/checkpoint_step_212000_20250923_083937.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60e6a6b2c21092dfe3f57f8074a07d5f18b8120c91e45914e4d0ecdeef8ef797
3
+ size 1788985
logs/run-20250923_060650-u2cujbdh/config.yaml ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.21.0
4
+ e:
5
+ thakdtmov0ddmmc4ymua4w9y4wntoxj3:
6
+ args:
7
+ - --config
8
+ - ICLR/config/bb2/bb2_switching_dim.yaml
9
+ - --gpu_id
10
+ - "7"
11
+ codePath: main_cheetah.py
12
+ codePathLocal: main_cheetah.py
13
+ cpu_count: 128
14
+ cpu_count_logical: 255
15
+ cudaVersion: "12.6"
16
+ disk:
17
+ /:
18
+ total: "6598647398400"
19
+ used: "2870406983680"
20
+ email: sangliteng@gmail.com
21
+ executable: /home/sangliteng/miniconda3/envs/learning-hybrid-systems/bin/python3
22
+ git:
23
+ commit: e65e9632c7a9d9bc1847e0a5a83e8e29db0ac56e
24
+ remote: git@github.com:SangliTeng/Leaning-Hybrid-Systems.git
25
+ gpu: NVIDIA RTX 6000 Ada Generation
26
+ gpu_count: 8
27
+ gpu_nvidia:
28
+ - architecture: Ada
29
+ cudaCores: 18176
30
+ memoryTotal: "51527024640"
31
+ name: NVIDIA RTX 6000 Ada Generation
32
+ uuid: GPU-45d30378-435b-de16-3aea-9fc48527fe61
33
+ - architecture: Ada
34
+ cudaCores: 18176
35
+ memoryTotal: "51527024640"
36
+ name: NVIDIA RTX 6000 Ada Generation
37
+ uuid: GPU-19a03a90-a9e0-a194-8d43-c2dcb7925140
38
+ - architecture: Ada
39
+ cudaCores: 18176
40
+ memoryTotal: "51527024640"
41
+ name: NVIDIA RTX 6000 Ada Generation
42
+ uuid: GPU-ea5b1c7d-baf5-6bcb-1ce1-0ee9ca4b5c8f
43
+ - architecture: Ada
44
+ cudaCores: 18176
45
+ memoryTotal: "51527024640"
46
+ name: NVIDIA RTX 6000 Ada Generation
47
+ uuid: GPU-b1a2e98c-e563-a0fe-47ce-cfa29028d5c7
48
+ - architecture: Ada
49
+ cudaCores: 18176
50
+ memoryTotal: "51527024640"
51
+ name: NVIDIA RTX 6000 Ada Generation
52
+ uuid: GPU-208eeaba-0174-d4e0-bc7a-2eb5f7983a6e
53
+ - architecture: Ada
54
+ cudaCores: 18176
55
+ memoryTotal: "51527024640"
56
+ name: NVIDIA RTX 6000 Ada Generation
57
+ uuid: GPU-81a0e787-8873-418d-6ff3-e3f59deb75a0
58
+ - architecture: Ada
59
+ cudaCores: 18176
60
+ memoryTotal: "51527024640"
61
+ name: NVIDIA RTX 6000 Ada Generation
62
+ uuid: GPU-8619099e-16b4-d667-b97f-518c3954df8c
63
+ - architecture: Ada
64
+ cudaCores: 18176
65
+ memoryTotal: "51527024640"
66
+ name: NVIDIA RTX 6000 Ada Generation
67
+ uuid: GPU-8042fac2-fd28-c8e9-668b-ceb33605fb49
68
+ host: hr-6000ada
69
+ memory:
70
+ total: "811164614656"
71
+ os: Linux-5.15.0-143-generic-x86_64-with-glibc2.35
72
+ program: /home/sangliteng/Research/Leaning-Hybrid-Systems/main_cheetah.py
73
+ python: CPython 3.12.11
74
+ root: ./ICLR/bb2
75
+ startedAt: "2025-09-23T06:06:50.598482Z"
76
+ writerId: thakdtmov0ddmmc4ymua4w9y4wntoxj3
77
+ m: []
78
+ python_version: 3.12.11
79
+ t:
80
+ "1":
81
+ - 1
82
+ "2":
83
+ - 1
84
+ "3":
85
+ - 2
86
+ - 13
87
+ - 15
88
+ - 16
89
+ "4": 3.12.11
90
+ "5": 0.21.0
91
+ "12": 0.21.0
92
+ "13": linux-x86_64
93
+ anti_collapse_weight:
94
+ value: 1000
95
+ batch_size:
96
+ value: 4096
97
+ data_path_test:
98
+ value: None
99
+ data_path_train:
100
+ value: /home/sangliteng/Research/DynaTraj/dataset/bb/bb2.npz
101
+ decoder_batch_size:
102
+ value: 131072
103
+ decoder_finetune_steps:
104
+ value: 200000
105
+ decoder_lr:
106
+ value: 0.001
107
+ default_activation:
108
+ value: ReLU
109
+ dim_linear_in_decoder:
110
+ value:
111
+ - 0
112
+ - 0
113
+ dim_linear_in_encoder:
114
+ value:
115
+ - 0
116
+ - 0
117
+ dim_linear_in_vec_field:
118
+ value:
119
+ - 0
120
+ - 0
121
+ dim_linear_out_decoder:
122
+ value: 0
123
+ dim_linear_out_encoder:
124
+ value: 0
125
+ dim_linear_out_vec_field:
126
+ value: 0
127
+ dynamics_init_scale:
128
+ value: 0.005
129
+ dynamics_loss_type:
130
+ value: l2
131
+ dynamics_weight:
132
+ value: 10
133
+ encoder_lr:
134
+ value: 0.001
135
+ eval_batch_size:
136
+ value: 64
137
+ eval_every:
138
+ value: 2.5e+22
139
+ eval_trajectory_length:
140
+ value: 500
141
+ except_features:
142
+ value: []
143
+ external_input_dim:
144
+ value: 1
145
+ hidden_dim_linear_decoder:
146
+ value: []
147
+ hidden_dim_linear_encoder:
148
+ value: []
149
+ hidden_dim_linear_vec_field:
150
+ value: []
151
+ hidden_dims_dec:
152
+ value:
153
+ - 128
154
+ - 128
155
+ - 128
156
+ - 128
157
+ - 128
158
+ - 128
159
+ - 128
160
+ - 128
161
+ hidden_dims_enc:
162
+ value:
163
+ - 64
164
+ - 64
165
+ - 64
166
+ hidden_dims_vector_field:
167
+ value:
168
+ - 128
169
+ - 128
170
+ input_dim:
171
+ value: 4
172
+ is_lagrangian_system:
173
+ value: true
174
+ isometry_loss_weight:
175
+ value: 0.1
176
+ latent_dim:
177
+ value: 8
178
+ learning_rate:
179
+ value: 0.0005
180
+ log_interval:
181
+ value: 50
182
+ loss_mode:
183
+ value: z
184
+ max_iso_samples:
185
+ value: 16384
186
+ min_covariance_threshold:
187
+ value: 0.09
188
+ model_type:
189
+ value: hybrid
190
+ normalize_data:
191
+ value: false
192
+ ode_method:
193
+ value: rk4
194
+ project_name:
195
+ value: debug architecture
196
+ reconstruction_loss_type:
197
+ value: l2
198
+ run_name:
199
+ value: bb2 - short horizon
200
+ save_checkpoint_every:
201
+ value: 250
202
+ smooth_budget:
203
+ value: 0.0001
204
+ smooth_weight:
205
+ value: 0
206
+ steps_per_length:
207
+ value: 2000
208
+ switching_dim:
209
+ value:
210
+ - 0
211
+ - 1
212
+ switching_threshold_scale:
213
+ value: 1.1
214
+ switching_weight_multiplier:
215
+ value: 2
216
+ test_info:
217
+ value: 0.1 iso loss weight
218
+ time_step:
219
+ value: 0.01
220
+ train_test_ratio:
221
+ value: 0.95
222
+ trajectory_lengths:
223
+ value:
224
+ - 10
225
+ - 20
226
+ - 40
227
+ - 80
228
+ - 100
229
+ - 100
230
+ use_switching_weights:
231
+ value: true
232
+ use_weight_smoothing:
233
+ value: false
234
+ vector_field_lr:
235
+ value: 0.001
236
+ viz_interval:
237
+ value: 50
238
+ wandb_base_dir:
239
+ value: ./ICLR/bb2
240
+ weight_smoothing_window:
241
+ value: 0
242
+ z_continuity_weight:
243
+ value: 10
mppi/task/bb1d_track.py CHANGED
@@ -177,6 +177,7 @@ class NeuralHybridDynamics:
177
 
178
  t_eval = out.get('t_eval', t_batch) # [B, Te] or [Te]
179
  x_traj = out['x_trajectory'] # [B, Te, Dx]
 
180
 
181
  # Interpolate from t_eval to our target sampling times
182
  x_pred = interpolate_trajectory(t_eval, t_batch - t_batch[:, :1], x_traj) # [B, H, Dx]
@@ -196,7 +197,7 @@ class BBTrack:
196
  # Objective
197
  self.omega = 2
198
  self.amplitude = 1
199
- self.offset = 0.5
200
  np.random.seed(self.parser.seed)
201
 
202
  print("\n=======Loading Environment=======")
@@ -230,7 +231,7 @@ class BBTrack:
230
  parser.add_argument("--realtime", action="store_true", help="Match simulation speed to real time")
231
  parser.add_argument("--seed", type=int, default=42, help="Random seed")
232
  parser.add_argument("--headless", action="store_true", help="Run in headless mode (no rendering)")
233
- parser.add_argument("--weights_dir", type=str, default="/home/lau/sim/DynaTraj/logs/run-20250922_190038-msikxorg", help="absolute path to the weights directory")
234
 
235
  return parser.parse_args()
236
 
@@ -287,7 +288,8 @@ class BBTrack:
287
  # print("target_z:",target_z[0,0])
288
 
289
  # Tracking cost - quadratic penalty for deviation from target
290
- pos_cost = (ball_pos - target_z) ** 2 + (board_pos - target_z) ** 2 *100
 
291
 
292
 
293
  # Control cost - penalize large board velocities (action is 6D: vx,vy,vz,wx,wy,wz)
@@ -322,7 +324,8 @@ class BBTrack:
322
  else:
323
  obs_tensor = obs
324
 
325
- print("GT:zt:",obs_tensor[0,0])
 
326
  target_z = torch.abs(torch.sin(self.omega * torch.tensor(self.current_simulation_time,device=self.device))) * self.amplitude + self.offset # [horizon]
327
  # print("target_z:",target_z)
328
 
 
177
 
178
  t_eval = out.get('t_eval', t_batch) # [B, Te] or [Te]
179
  x_traj = out['x_trajectory'] # [B, Te, Dx]
180
+ z_traj = out['z_trajectory'] # TODO
181
 
182
  # Interpolate from t_eval to our target sampling times
183
  x_pred = interpolate_trajectory(t_eval, t_batch - t_batch[:, :1], x_traj) # [B, H, Dx]
 
197
  # Objective
198
  self.omega = 2
199
  self.amplitude = 1
200
+ self.offset = 0.8
201
  np.random.seed(self.parser.seed)
202
 
203
  print("\n=======Loading Environment=======")
 
231
  parser.add_argument("--realtime", action="store_true", help="Match simulation speed to real time")
232
  parser.add_argument("--seed", type=int, default=42, help="Random seed")
233
  parser.add_argument("--headless", action="store_true", help="Run in headless mode (no rendering)")
234
+ parser.add_argument("--weights_dir", type=str, default="/home/lau/sim/DynaTraj/logs/run-20250923_060650-u2cujbdh", help="absolute path to the weights directory")
235
 
236
  return parser.parse_args()
237
 
 
288
  # print("target_z:",target_z[0,0])
289
 
290
  # Tracking cost - quadratic penalty for deviation from target
291
+ # pos_cost = (ball_pos - target_z) ** 2
292
+ pos_cost = (board_pos - target_z) ** 2 *100 # TODO
293
 
294
 
295
  # Control cost - penalize large board velocities (action is 6D: vx,vy,vz,wx,wy,wz)
 
324
  else:
325
  obs_tensor = obs
326
 
327
+ # print("GT:zt:",obs_tensor[0,0])
328
+ print("Board GT:zt:",obs_tensor[0,2]) # TODO
329
  target_z = torch.abs(torch.sin(self.omega * torch.tensor(self.current_simulation_time,device=self.device))) * self.amplitude + self.offset # [horizon]
330
  # print("target_z:",target_z)
331