chrisjcc commited on
Commit
bf2e8ae
·
verified ·
1 Parent(s): 7caf98f

Remove superfluous code

Browse files
Files changed (1) hide show
  1. tray_sim.py +3 -210
tray_sim.py CHANGED
@@ -25,164 +25,6 @@ def quat_to_rotmat(q):
25
  [2*x*z - 2*y*w, 2*y*z + 2*x*w, 1 - 2*x**2 - 2*y**2]
26
  ])
27
 
28
- def classify_orientation_stability(data, model, alpha_deg=45):
29
- """
30
- Check object stability using orientation.
31
- Returns a list of booleans, True if the object is stable.
32
- """
33
- stable_objects = []
34
- cos_alpha = np.cos(np.deg2rad(alpha_deg))
35
- g = np.array([0, 0, -1])
36
-
37
- for i in range(N_OBJECTS):
38
- q = data.qpos[i*7 + 3 : i*7 + 7]
39
- rotmat = quat_to_rotmat(q)
40
- obj_z_world = rotmat[:, 2] # local z-axis in world coords
41
- cos_theta = -np.dot(g, obj_z_world) # because gravity is downward
42
- stable = cos_theta > cos_alpha
43
- stable_objects.append(stable)
44
-
45
- return stable_objects
46
-
47
- def classify_orientation_stability_verbose(data, model, alpha_deg=45):
48
- results = []
49
- cos_alpha = np.cos(np.deg2rad(alpha_deg))
50
- g = np.array([0, 0, -1])
51
-
52
- for i in range(N_OBJECTS):
53
- q = data.qpos[i*7 + 3 : i*7 + 7]
54
- rotmat = quat_to_rotmat(q)
55
- obj_z_world = rotmat[:, 2]
56
- cos_theta = -np.dot(g, obj_z_world)
57
-
58
- if cos_theta > cos_alpha:
59
- results.append({"stable": True, "reason": "upright (tilt within limit)"})
60
- else:
61
- angle_deg = np.rad2deg(np.arccos(cos_theta))
62
- results.append({
63
- "stable": False,
64
- "reason": f"tilt angle {angle_deg:.2f}° exceeds {alpha_deg}°"
65
- })
66
-
67
- return results
68
-
69
-
70
- def classify_stability(data, model):
71
- """
72
- Classify object stability based on position data.
73
-
74
- For each object in the scene:
75
- - Check the object's position using data.qpos[z].
76
- - Determine whether the object is upright, within the tray bounds, or has fallen/rolled over.
77
- - Define thresholds for "stable" as:
78
- * z-position > 0.01
79
- * x and y within tray bounds of ±0.3
80
-
81
- Args:
82
- data: MuJoCo data object containing simulation state.
83
- model: MuJoCo model (unused in this function but typically passed alongside data).
84
-
85
- Returns:
86
- List of booleans indicating whether each object is stable.
87
- """
88
-
89
- stable_objects = []
90
- tray_bounds = 0.3
91
- for i in range(N_OBJECTS):
92
- pos = data.qpos[i*7 : i*7 + 3]
93
- if abs(pos[0]) < tray_bounds and abs(pos[1]) < tray_bounds and pos[2] > 0.01:
94
- stable_objects.append(True)
95
- else:
96
- stable_objects.append(False)
97
- return stable_objects
98
-
99
- # Extend the classifier to explain why something is unstable:
100
- def classify_stability_verbose(data, model):
101
- flags = []
102
- tray_bounds = 0.3
103
- for i in range(N_OBJECTS):
104
- pos = data.qpos[i*7 : i*7 + 3]
105
- reasons = []
106
- if pos[2] < 0.01:
107
- reasons.append("z too low (possibly toppled)")
108
- if abs(pos[0]) > tray_bounds or abs(pos[1]) > tray_bounds:
109
- reasons.append("outside tray bounds")
110
- if not reasons:
111
- flags.append({"stable": True, "reason": "object upright and within tray"})
112
- else:
113
- flags.append({"stable": False, "reason": ", ".join(reasons)})
114
- return flags
115
-
116
- # Create LLM-Friendly Prompt Template
117
- def format_llm_prompt(physics_state):
118
- """
119
- Converts a physics state dictionary of a simulated scene into a JSON-serializable list of objects
120
- (human-readable), where each object contains position, orientation, velocity, angular velocity,
121
- and stability information suitable for prompting a large language model (LLM).
122
-
123
- Each object is described by its:
124
- - 3D position
125
- - orientation in ZYX Euler angles (in degrees)
126
- - linear velocity
127
- - angular velocity
128
- - stability status (Stable or Unstable)
129
-
130
- Parameters:
131
- physics_state (dict): A dictionary containing the physics properties of each object,
132
- with the following keys:
133
- - "positions_xyz": List of shape (N, 3), each entry a position vector [x, y, z]
134
- - "orientations_euler": List of shape (N, 3), each entry an orientation vector [z, y, x] in degrees
135
- - "velocities_linear": List of shape (N, 3), linear velocity vectors
136
- - "velocities_angular": List of shape (N, 3), angular velocity vectors
137
- - "stable_flags": List of bools indicating whether each object is stable
138
-
139
- Returns:
140
- List[dict]: Each dictionary represents an object with formatted fields
141
- suitable for JSON serialization.
142
- """
143
- formatted = []
144
- for i in range(len(physics_state["positions_xyz"])):
145
- obj = {
146
- "id": i,
147
- "position": np.round(physics_state["positions_xyz"][i], 3).tolist(),
148
- "orientation_euler_zyx_deg": np.round(physics_state["orientations_euler"][i], 1).tolist(),
149
- "linear_velocity": np.round(physics_state["velocities_linear"][i], 3).tolist(),
150
- "angular_velocity": np.round(physics_state["velocities_angular"][i], 3).tolist(),
151
- "status": "Stable" if physics_state["stable_flags"][i] else "Unstable"
152
- #"status": "Stable" if verbose["stable"] else f"Unstable ({verbose['reason']})"
153
- }
154
- formatted.append(obj)
155
- return formatted
156
-
157
- def format_as_natural_language_prompt(scene_description, task_description=None):
158
- """
159
- Convert structured physics state into a natural language prompt for LLMs.
160
-
161
- Args:
162
- scene_description (List[dict]): Output of format_llm_prompt().
163
- task_description (str, optional): Additional instruction to prepend, such as
164
- "Describe which blocks are unstable and why" or
165
- "Predict which objects might move further if the tray tilts".
166
-
167
- Returns:
168
- str: Natural language prompt for an LLM.
169
- """
170
- lines = []
171
- if task_description:
172
- lines.append(task_description.strip())
173
- lines.append("") # blank line for separation
174
-
175
- lines.append("Here is the scene summary:")
176
-
177
- for obj in scene_description:
178
- line = (
179
- f"Object {obj['id']} is at position {obj['position']} with orientation {obj['orientation_euler_zyx_deg']} degrees. "
180
- f"It has linear velocity {obj['linear_velocity']} and angular velocity {obj['angular_velocity']}. "
181
- f"Status: {obj['status']}."
182
- )
183
- lines.append(line)
184
-
185
- return "\n".join(lines)
186
 
187
  def run_tray_simulation(
188
  seed=0,
@@ -226,30 +68,12 @@ def run_tray_simulation(
226
  direction /= np.linalg.norm(direction)
227
  push_velocity = 0.15 * direction # Adjust magnitude
228
 
229
- # Object initialization
230
- for i in range(num_objects):
231
- obj_start = i * 7 # 7 DOF per free joint body
232
- pos = np.random.uniform(low=[-0.1, -0.1, 0.05], high=[0.1, 0.1, 0.15])
233
- quat = [1, 0, 0, 0] # no rotation
234
- data.qpos[obj_start:obj_start+3] = pos
235
- data.qpos[obj_start+3:obj_start+7] = quat
236
- data.qvel[i*6:i*6+6] = 0
237
- print(f"Object {i} initial position: {pos}") # Debug print
238
-
239
  # Renderer setup
240
  renderer = mujoco.Renderer(model, width=480, height=480) # width=640, height=640
241
 
242
  # Custom camera parameters
243
  scene_option = mujoco.MjvOption()
244
 
245
- # Enable contact visualization in the renderer to debug collisions
246
- # Check if contact points appear when the puck hits the tray. If not,
247
- # collisions are not being detected.
248
- #scene_option.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = True
249
- #scene_option.flags[mujoco.mjtVisFlag.mjVIS_CONTACTFORCE] = True
250
- #scene_option.flags[mujoco.mjtVisFlag.mjVIS_JOINT] = True # if you want to see joint frames or anchors to understand the articulated structure.
251
- #scene_option.flags[mujoco.mjtVisFlag.mjVIS_TRANSPARENT] = True # if the model uses transparent geoms and you want them shown correctly.
252
-
253
  cam = mujoco.MjvCamera()
254
  cam.type = mujoco.mjtCamera.mjCAMERA_FREE
255
  cam.lookat[:] = np.array([0.0, 0.0, 0.02]) # Center of the tray
@@ -263,20 +87,8 @@ def run_tray_simulation(
263
  for t in range(SIM_STEPS):
264
  if t == PUSH_START_STEP:
265
  # Activate pusher
266
- #data.qvel[pusher_vel_idx:pusher_vel_idx+3] = [-0.05, 0.05, 0.0] # toward center
267
  data.qvel[pusher_vel_idx:pusher_vel_idx+3] = push_velocity # toward center
268
 
269
- #if t == IMPACT_STEP and not tray_force_applied:
270
- # mujoco.mj_applyFT(
271
- # model,
272
- # data,
273
- # np.array([5.0, 0.0, 0.0]), # force (e.g. impulse to the right; adjust magnitude)
274
- # np.array([0.0, 0.0, 0.0]), # torque
275
- # np.array([0.0, 0.0, 0.0]), # point in global frame (apply at body origin)
276
- # tray_id, # body ID
277
- # data.qfrc_applied # force accumulator to apply into
278
- # )
279
- # tray_force_applied = True
280
 
281
  mujoco.mj_step(model, data)
282
 
@@ -288,17 +100,11 @@ def run_tray_simulation(
288
  img = renderer.render()
289
 
290
  if t in keyframes:
291
- #imageio.imwrite(f"/tmp/frame_{t}.png", img)
292
  imageio.imwrite(f"/tmp/frame_{t}_{unique_id}.png", img)
293
 
294
  frames.append(img)
295
 
296
  renderer.close()
297
-
298
- stability_flags = classify_stability(data, model)
299
-
300
- # Optional: print or return
301
- print("Stability:", stability_flags)
302
 
303
  euler_angles = []
304
  for i in range(num_objects):
@@ -310,35 +116,22 @@ def run_tray_simulation(
310
 
311
  # Fully structured snapshot of: Positions,
312
  # Orientations (in both quaternion and Euler angle forms),
313
- # Linear and angular velocities, and Stability flags
314
  physics_state = {
315
  "positions_xyz": data.qpos[:num_objects * 7].reshape(num_objects, 7)[:, :3].tolist(), # [x,y,z,qw,qx,qy,qz]
316
  "orientations_quat": data.qpos[:num_objects * 7].reshape(num_objects, 7)[:, 3:].tolist(),
317
  "orientations_euler": euler_angles,
318
  "velocities_linear": data.qvel[:num_objects * 6].reshape(num_objects, 6)[:, :3].tolist(), # [vx,vy,vz,wx,wy,wz]
319
  "velocities_angular": data.qvel[:num_objects * 6].reshape(num_objects, 6)[:, 3:].tolist(),
320
- "stable_flags": stability_flags,
321
- #"stable_verbose": classify_stability_verbose(data, model), more interpretable to LLMs
322
  }
323
 
324
- # Save JSON Snapshot (optional, for LLM input/debugging)
325
- #json_path = os.path.join(tempfile.gettempdir(), f"tray_sim_{seed}_state.json")
326
- # Save JSON with unique ID
327
  json_path = os.path.join(tempfile.gettempdir(), f"tray_sim_{unique_id}_state.json")
328
  with open(json_path, "w") as f:
329
  json.dump(physics_state, f, indent=2)
330
 
331
- # LLM-friendly prompt output
332
- formatted = format_llm_prompt(physics_state)
333
- prompt = format_as_natural_language_prompt(
334
- formatted,
335
- task_description="Explain which objects are likely to fall if the tray is tilted slightly to the right."
336
- )
337
-
338
- # Save to GIF
339
- #gif_path = os.path.join(tempfile.gettempdir(), f"tray_sim_{seed}.gif")
340
  # Save GIF with unique ID
341
  gif_path = os.path.join(tempfile.gettempdir(), f"tray_sim_{unique_id}.gif")
342
  imageio.mimsave(gif_path, frames, fps=20)
343
 
344
- return gif_path, stability_flags, physics_state, prompt, json_path # optionally also return json_path
 
25
  [2*x*z - 2*y*w, 2*y*z + 2*x*w, 1 - 2*x**2 - 2*y**2]
26
  ])
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  def run_tray_simulation(
30
  seed=0,
 
68
  direction /= np.linalg.norm(direction)
69
  push_velocity = 0.15 * direction # Adjust magnitude
70
 
 
 
 
 
 
 
 
 
 
 
71
  # Renderer setup
72
  renderer = mujoco.Renderer(model, width=480, height=480) # width=640, height=640
73
 
74
  # Custom camera parameters
75
  scene_option = mujoco.MjvOption()
76
 
 
 
 
 
 
 
 
 
77
  cam = mujoco.MjvCamera()
78
  cam.type = mujoco.mjtCamera.mjCAMERA_FREE
79
  cam.lookat[:] = np.array([0.0, 0.0, 0.02]) # Center of the tray
 
87
  for t in range(SIM_STEPS):
88
  if t == PUSH_START_STEP:
89
  # Activate pusher
 
90
  data.qvel[pusher_vel_idx:pusher_vel_idx+3] = push_velocity # toward center
91
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  mujoco.mj_step(model, data)
94
 
 
100
  img = renderer.render()
101
 
102
  if t in keyframes:
 
103
  imageio.imwrite(f"/tmp/frame_{t}_{unique_id}.png", img)
104
 
105
  frames.append(img)
106
 
107
  renderer.close()
 
 
 
 
 
108
 
109
  euler_angles = []
110
  for i in range(num_objects):
 
116
 
117
  # Fully structured snapshot of: Positions,
118
  # Orientations (in both quaternion and Euler angle forms),
119
+ # Linear and angular velocities
120
  physics_state = {
121
  "positions_xyz": data.qpos[:num_objects * 7].reshape(num_objects, 7)[:, :3].tolist(), # [x,y,z,qw,qx,qy,qz]
122
  "orientations_quat": data.qpos[:num_objects * 7].reshape(num_objects, 7)[:, 3:].tolist(),
123
  "orientations_euler": euler_angles,
124
  "velocities_linear": data.qvel[:num_objects * 6].reshape(num_objects, 6)[:, :3].tolist(), # [vx,vy,vz,wx,wy,wz]
125
  "velocities_angular": data.qvel[:num_objects * 6].reshape(num_objects, 6)[:, 3:].tolist(),
 
 
126
  }
127
 
128
+ # Save JSON Snapshot with unique ID (optional, for LLM input/debugging)
 
 
129
  json_path = os.path.join(tempfile.gettempdir(), f"tray_sim_{unique_id}_state.json")
130
  with open(json_path, "w") as f:
131
  json.dump(physics_state, f, indent=2)
132
 
 
 
 
 
 
 
 
 
 
133
  # Save GIF with unique ID
134
  gif_path = os.path.join(tempfile.gettempdir(), f"tray_sim_{unique_id}.gif")
135
  imageio.mimsave(gif_path, frames, fps=20)
136
 
137
+ return gif_path, json_path # optionally also return json_path