diff --git a/README_dmcontrol_collect.md b/README_dmcontrol_collect.md new file mode 100644 index 0000000000000000000000000000000000000000..6b2a4f3c86d8ff20b3a0f57f4e95832a87a10536 --- /dev/null +++ b/README_dmcontrol_collect.md @@ -0,0 +1,87 @@ +## dm_control data collection (dmcontrol_collect.py) + +### Overview +This script collects trajectories from DeepMind Control (dm_control) environments using uniformly sampled torque actions in [-1, 1]. Data are saved with `TrajectoryBuffer` as compressed `.npz` along with a metadata `.pkl`. + +Collected state per step contains (in order): +- joint angles (radians) +- joint angular velocities (rad/s) +- root position (x, y, z) +- root linear velocity (vx, vy, vz) +- root rotation quaternion (qx, qy, qz, qw) +- root angular velocity (wx, wy, wz) +- last applied torque (action vector) + +### Requirements +- Python 3.9+ +- dm_control and MuJoCo installed: + ```bash + pip install dm-control mujoco + ``` + +### Hyperparameters (CLI) + +| Name | Type / Default | Description | +|------|-----------------|-------------| +| `--domain` | str, default `quadruped` | dm_control domain name, e.g. `quadruped`, `cheetah`. | +| `--task` | str, default `walk` | dm_control task name, e.g. `walk`, `run`. | +| `--seed` | int, default `0` | PRNG seed used for env and action sampling. | +| `--trajectories_per_file` | int, default `512` | Number of trajectories to collect and save in one output file. | +| `--steps_per_trajectory` | int, default `48` | Number of steps per trajectory segment saved to the dataset. | +| `--out_dir` | str, default `/home/lau/sim/DynaTraj/dataset` | Directory to store output `.npz` and metadata `.pkl`. | +| `--render` | flag (bool), default `False` | If set, render frames during collection (tries OpenCV, then matplotlib). | + +Notes: +- Actions are sampled i.i.d. uniformly from [-1, 1] each step and treated as torques. +- If the model uses a free base, the root quaternion is output as `(x, y, z, w)`. + +### Output format +- Dataset file: `dmcontrol_{domain}_{task}_seed{seed}_{timestamp}.npz` +- Metadata file: `dmcontrol_{domain}_{task}_seed{seed}_{timestamp}_metadata.pkl` + +`npz` keys (all stored by `TrajectoryBuffer`): +- `obs`: shape `[N, B, T, D_obs]` +- `ext_obs`: shape `[N, B, T, D_obs]` (same content as `obs` in this script) +- `action`: shape `[N, B, T, D_act]` +- `reward`: shape `[N, B, T]` +- `done`: shape `[N, B, T]` + +Where: +- `N` = number of trajectory segments (equals `trajectories_per_file` for `B=1`) +- `B` = batch size (this script uses `B=1`) +- `T` = `steps_per_trajectory` +- `D_obs` = state dimension described above +- `D_act` = action dimension from the environment action spec + +The metadata `.pkl` contains: domain, task, seed, counts, action bounds, timestamp, and `render` flag. + +### Examples +- Quadruped walk (default): + ```bash + python /home/lau/sim/DynaTraj/dmcontrol_collect.py + ``` +- Cheetah run (planar cheetah): + ```bash + python /home/lau/sim/DynaTraj/dmcontrol_collect.py --domain cheetah --task run --seed 1 --trajectories_per_file 512 --steps_per_trajectory 48 --out_dir /home/lau/sim/DynaTraj/dataset + ``` +- With rendering (requires OpenCV or matplotlib): + ```bash + python /home/lau/sim/DynaTraj/dmcontrol_collect.py --domain quadruped --task walk --render + ``` + +### Tips +- Rendering slows down collection; disable `--render` when collecting large datasets. +- If a task terminates early, the script resets automatically and continues until it reaches the requested number of trajectories. +- Ensure MuJoCo is set up properly in your environment if dm_control fails to import. + + +python /home/lau/sim/DynaTraj/sb3_collect.py \ + --domain cheetah --task run \ + --algo SAC \ + --ckpt_root /home/lau/sim/DynaTraj/weights \ + --ckpt_indices 1,10,20,30,40,50 \ + --trajectories_per_ckpt 5120 \ + --steps_per_trajectory 24 \ + --out_dir /home/lau/sim/DynaTraj/dataset \ + --device cpu \ + --render \ No newline at end of file diff --git a/__pycache__/dataset.cpython-310.pyc b/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a0e6219bc3ee9b76738f1a9c43b3cf300bc4f5 Binary files /dev/null and b/__pycache__/dataset.cpython-310.pyc differ diff --git a/dataset.py b/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4853248f052941979bee24fe47965d2bf5da840d --- /dev/null +++ b/dataset.py @@ -0,0 +1,48 @@ +import numpy as np +from collections import defaultdict + +class TrajectoryBuffer: + + + def __init__(self, traj_steps): + self.traj_steps = traj_steps + + self.step_idx = 0 + self.buffers = defaultdict(list) + self.traj_pool = defaultdict(list) + self.batch_size = None + + def append_step(self, obs, ext_obs,action, reward, done): + """ + obs : [B, …] + action : [B, …] + reward : [B] + done : [B] + """ + if self.batch_size is None: + self.batch_size = obs.shape[0] + self.buffers["obs"].append(obs.copy()) + self.buffers["action"].append(action.copy()) + self.buffers["reward"].append(reward.copy()) + self.buffers["done"].append(done.copy()) + self.buffers["ext_obs"].append(ext_obs.copy()) + + self.step_idx += 1 + + if self.step_idx % self.traj_steps == 0: + for k, lst in self.buffers.items(): + traj_segment = np.stack(lst, axis=1) + self.traj_pool[k].append(traj_segment) + lst.clear() + + def finalize(self): + return {k: np.stack(v, axis=0) for k, v in self.traj_pool.items()} + + def save(self, path): + np.savez_compressed(path, **self.finalize()) + + def __len__(self): + if not self.traj_pool or self.batch_size is None: + return 0 + flushes = len(next(iter(self.traj_pool.values()))) + return flushes * self.batch_size diff --git a/dataset/sb3_cheetah_run_ckpt001_2025-08-08_01-32-13.npz b/dataset/sb3_cheetah_run_ckpt001_2025-08-08_01-32-13.npz new file mode 100644 index 0000000000000000000000000000000000000000..aed9d66e69584809b9ad854a3ef4601dc1763649 --- /dev/null +++ b/dataset/sb3_cheetah_run_ckpt001_2025-08-08_01-32-13.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e9ff4c767057386f7c39e45ed4ae845aead4295ae335d075899fb1251f01e93 +size 25049522 diff --git a/dataset/sb3_cheetah_run_ckpt001_2025-08-08_01-32-13_metadata.pkl b/dataset/sb3_cheetah_run_ckpt001_2025-08-08_01-32-13_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..50c80d200571c346168e86b9d6eea4bc655d586a Binary files /dev/null and b/dataset/sb3_cheetah_run_ckpt001_2025-08-08_01-32-13_metadata.pkl differ diff --git a/dataset/sb3_cheetah_run_ckpt010_2025-08-08_01-32-52.npz b/dataset/sb3_cheetah_run_ckpt010_2025-08-08_01-32-52.npz new file mode 100644 index 0000000000000000000000000000000000000000..0bf074d71761f675381bebc545670c16539d66b6 --- /dev/null +++ b/dataset/sb3_cheetah_run_ckpt010_2025-08-08_01-32-52.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30a377139a84ec00c25991d88786f5888f87d67d04d7dd7b0b3a7884bdb817d7 +size 25287441 diff --git a/dataset/sb3_cheetah_run_ckpt010_2025-08-08_01-32-52_metadata.pkl b/dataset/sb3_cheetah_run_ckpt010_2025-08-08_01-32-52_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c281c55d246c96735442fcc9feb71350e3a9890f Binary files /dev/null and b/dataset/sb3_cheetah_run_ckpt010_2025-08-08_01-32-52_metadata.pkl differ diff --git a/dataset/sb3_cheetah_run_ckpt020_2025-08-08_01-33-31.npz b/dataset/sb3_cheetah_run_ckpt020_2025-08-08_01-33-31.npz new file mode 100644 index 0000000000000000000000000000000000000000..ae768faa18dce4079e0ca1ff27575f8195e33463 --- /dev/null +++ b/dataset/sb3_cheetah_run_ckpt020_2025-08-08_01-33-31.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:233ceb0c27ed9e88d10b98123170d2da3e19044be8d11bcd3b17df54e3a730a2 +size 25215285 diff --git a/dataset/sb3_cheetah_run_ckpt020_2025-08-08_01-33-31_metadata.pkl b/dataset/sb3_cheetah_run_ckpt020_2025-08-08_01-33-31_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d695f6fff1ad08e46f610dd9ae8136cebfa3c202 Binary files /dev/null and b/dataset/sb3_cheetah_run_ckpt020_2025-08-08_01-33-31_metadata.pkl differ diff --git a/dataset/sb3_cheetah_run_ckpt030_2025-08-08_01-34-10.npz b/dataset/sb3_cheetah_run_ckpt030_2025-08-08_01-34-10.npz new file mode 100644 index 0000000000000000000000000000000000000000..a169eb43d017513d0bd49c06c06bf38aa889d869 --- /dev/null +++ b/dataset/sb3_cheetah_run_ckpt030_2025-08-08_01-34-10.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce9a474990a0f216bf61b172523bea7f918c66fc98721c393e4284a8632185d5 +size 25393126 diff --git a/dataset/sb3_cheetah_run_ckpt030_2025-08-08_01-34-10_metadata.pkl b/dataset/sb3_cheetah_run_ckpt030_2025-08-08_01-34-10_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..fe4aaf6f8bbd99f953b10146c1b6443603b6e8b0 Binary files /dev/null and b/dataset/sb3_cheetah_run_ckpt030_2025-08-08_01-34-10_metadata.pkl differ diff --git a/dataset/sb3_cheetah_run_ckpt040_2025-08-08_01-34-50.npz b/dataset/sb3_cheetah_run_ckpt040_2025-08-08_01-34-50.npz new file mode 100644 index 0000000000000000000000000000000000000000..058ed77d9da6e96caae152f449af7cb115b9cf37 --- /dev/null +++ b/dataset/sb3_cheetah_run_ckpt040_2025-08-08_01-34-50.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f58d03f3fb03b927d28d9839c6cfe1fc16a216456dc6ab5df7f5743e66a9250 +size 25368383 diff --git a/dataset/sb3_cheetah_run_ckpt040_2025-08-08_01-34-50_metadata.pkl b/dataset/sb3_cheetah_run_ckpt040_2025-08-08_01-34-50_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..94053806b189f50cf824c079163b4dfdecbf27f4 Binary files /dev/null and b/dataset/sb3_cheetah_run_ckpt040_2025-08-08_01-34-50_metadata.pkl differ diff --git a/dataset/sb3_cheetah_run_ckpt050_2025-08-08_01-35-40.npz b/dataset/sb3_cheetah_run_ckpt050_2025-08-08_01-35-40.npz new file mode 100644 index 0000000000000000000000000000000000000000..146f939cd2e909ff3ebcf8b902f329b6a1bedc62 --- /dev/null +++ b/dataset/sb3_cheetah_run_ckpt050_2025-08-08_01-35-40.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dc13467e61a171ce0aacf16b01a7275ef6f7c57794fc99c5092f0db313bba52 +size 25363130 diff --git a/dataset/sb3_cheetah_run_ckpt050_2025-08-08_01-35-40_metadata.pkl b/dataset/sb3_cheetah_run_ckpt050_2025-08-08_01-35-40_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6cef06c029212108726c6b2262b8921ffc02facc Binary files /dev/null and b/dataset/sb3_cheetah_run_ckpt050_2025-08-08_01-35-40_metadata.pkl differ diff --git a/dmcontrol_collect.py b/dmcontrol_collect.py new file mode 100644 index 0000000000000000000000000000000000000000..b44d3cdd33b484e9dee9003cfaac8ac4a09451ad --- /dev/null +++ b/dmcontrol_collect.py @@ -0,0 +1,294 @@ +import argparse +import os +import time +from datetime import datetime + +import numpy as np +from tqdm import tqdm + +from dataset import TrajectoryBuffer + +# dm_control imports +try: + from dm_control import suite +except Exception as e: + raise RuntimeError( + "dm_control is required. Install via: pip install dm-control mujoco" + ) from e + + +class _RenderHelper: + def __init__(self): + self.backend = None + self._warned = False + self._cv2 = None + self._plt = None + self._fig = None + self._ax = None + self._im = None + try: + import cv2 # type: ignore + + self._cv2 = cv2 + self.backend = "cv2" + except Exception: + try: + import matplotlib.pyplot as plt # type: ignore + + self._plt = plt + self.backend = "mpl" + self._fig, self._ax = plt.subplots() + self._im = None + plt.ion() + except Exception: + self.backend = None + + def show(self, rgb: np.ndarray): + if self.backend == "cv2" and self._cv2 is not None: + bgr = rgb[..., ::-1] + self._cv2.imshow("dmcontrol", bgr) + self._cv2.waitKey(1) + elif self.backend == "mpl" and self._plt is not None: + if self._im is None: + self._im = self._ax.imshow(rgb) + self._ax.axis("off") + else: + self._im.set_data(rgb) + self._plt.pause(0.001) + else: + if not self._warned: + print("[WARN] Rendering requested but no display backend found (cv2/matplotlib). Skipping render.") + self._warned = True + + def close(self): + if self.backend == "cv2" and self._cv2 is not None: + self._cv2.destroyAllWindows() + elif self.backend == "mpl" and self._plt is not None and self._fig is not None: + self._plt.close(self._fig) + + +def build_state_from_physics(physics: "suite.Environment.physics", last_action: np.ndarray) -> np.ndarray: + """ + Build the state vector from MuJoCo physics and the last applied action (torque). + + State contains, in order: + - joint angles (radians) + - joint angular velocities (rad/s) + - root position (x, y, z) + - root linear velocity (vx, vy, vz) + - root rotation quaternion (qx, qy, qz, qw) + - root angular velocity (wx, wy, wz) + - last torque applied (per actuator) + """ + # Copy to avoid referencing MuJoCo buffers + qpos = np.array(physics.data.qpos, dtype=np.float32).copy() + qvel = np.array(physics.data.qvel, dtype=np.float32).copy() + + # Assume floating base with free joint at the beginning (most 3D locomotion models) + # qpos: [x, y, z, qw, qx, qy, qz, joint_angles...] + # qvel: [vx, vy, vz, wx, wy, wz, joint_velocities...] + if qpos.shape[0] >= 7 and qvel.shape[0] >= 6: + root_pos = qpos[0:3] + # Reorder quaternion from (w, x, y, z) to (x, y, z, w) + qwxyz = qpos[3:7] + root_quat = np.array([qwxyz[1], qwxyz[2], qwxyz[3], qwxyz[0]], dtype=np.float32) + root_lin_vel = qvel[0:3] + root_ang_vel = qvel[3:6] + joint_angles = qpos[7:] + joint_vels = qvel[6:] + else: + # Fallback for planar / non-free base models: no 3D root state + root_pos = np.zeros(3, dtype=np.float32) + root_quat = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32) + root_lin_vel = np.zeros(3, dtype=np.float32) + root_ang_vel = np.zeros(3, dtype=np.float32) + joint_angles = qpos.astype(np.float32) + joint_vels = qvel.astype(np.float32) + + state_parts = [ + joint_angles.astype(np.float32), + joint_vels.astype(np.float32), + root_pos.astype(np.float32), + root_lin_vel.astype(np.float32), + root_quat.astype(np.float32), + root_ang_vel.astype(np.float32), + last_action.astype(np.float32), + ] + return np.concatenate(state_parts, dtype=np.float32) + + +essential_hparams = dict( + trajectories_per_file=512, + steps_per_trajectory=48, +) + + +def collect_dmcontrol( + domain: str, + task: str, + seed: int, + trajectories_per_file: int, + steps_per_trajectory: int, + out_dir: str, + render: bool = False, +): + rng = np.random.RandomState(seed) + + # Load environment + env = suite.load( + domain_name=domain, + task_name=task, + task_kwargs={"random": seed}, + environment_kwargs={"flat_observation": False}, + ) + + action_spec = env.action_spec() + if action_spec.minimum is None or action_spec.maximum is None: + # Default to [-1, 1] if not specified (should be present in DMC) + action_low = -np.ones(action_spec.shape, dtype=np.float32) + action_high = np.ones(action_spec.shape, dtype=np.float32) + else: + action_low = np.asarray(action_spec.minimum, dtype=np.float32) + action_high = np.asarray(action_spec.maximum, dtype=np.float32) + + # Prepare output directory + os.makedirs(out_dir, exist_ok=True) + + # Create buffer + buffer = TrajectoryBuffer(steps_per_trajectory) + + # Optional renderer + viewer = _RenderHelper() if render else None + + # Reset env + ts = env.reset() + prev_action = np.zeros(action_spec.shape, dtype=np.float32) + + # Progress + pbar = tqdm(total=trajectories_per_file, desc=f"Collecting {domain}/{task}") + + # Main loop until we fill the required number of trajectories + while len(buffer) < trajectories_per_file: + # Build current state from physics and last applied torque + state = build_state_from_physics(env.physics, prev_action) + + # Reward / done from current timestep + reward = 0.0 if ts.reward is None else float(ts.reward) + done = bool(ts.last()) + + # Prepare batch dimension B=1 + obs_np = state[None, :] + ext_obs_np = obs_np # store the same as ext_obs for convenience + action_np = prev_action[None, :] + reward_np = np.array([reward], dtype=np.float32) + done_np = np.array([done], dtype=np.bool_) + + # Append to buffer + buffer.append_step(obs_np, ext_obs_np, action_np, reward_np, done_np) + + # Sample next action uniformly in [-1, 1] + action = rng.uniform(low=-1, high=1, size=action_spec.shape).astype(np.float32) + + # Step the environment + ts = env.step(action) + + # Render current frame if requested + if viewer is not None: + try: + frame = env.physics.render(height=480, width=640, camera_id=0) + viewer.show(frame) + except Exception as _: + # Suppress rendering errors to not break collection + pass + + # Update last action (torque) for next state build + prev_action = action + + # Handle episode termination + if ts.last(): + ts = env.reset() + prev_action = np.zeros_like(prev_action) + + # Update progress + pbar.n = len(buffer) + pbar.refresh() + + pbar.close() + + if viewer is not None: + viewer.close() + + # Save dataset + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + file_stem = f"dmcontrol_{domain}_{task}_seed{seed}_{timestamp}" + dataset_path = os.path.join(out_dir, f"{file_stem}.npz") + buffer.save(dataset_path) + + # Save metadata + metadata = { + "domain": domain, + "task": task, + "seed": seed, + "num_trajectories": len(buffer), + "steps_per_trajectory": steps_per_trajectory, + "total_steps": int(len(buffer) * steps_per_trajectory), + "action_low": action_low.tolist(), + "action_high": action_high.tolist(), + "collected_at": timestamp, + "render": bool(render), + } + import pickle + + metadata_path = os.path.join(out_dir, f"{file_stem}_metadata.pkl") + with open(metadata_path, "wb") as f: + pickle.dump(metadata, f) + + print(f"[INFO] Saved {len(buffer)} trajectories to {dataset_path}") + print(f"[INFO] Saved metadata to {metadata_path}") + + +def parse_args(): + parser = argparse.ArgumentParser(description="Collect dm_control data with random torque actions") + parser.add_argument("--domain", type=str, default="quadruped", help="dm_control domain name (e.g., quadruped, cheetah)") + parser.add_argument("--task", type=str, default="walk", help="dm_control task name (e.g., walk, run)") + parser.add_argument("--seed", type=int, default=0, help="Random seed") + parser.add_argument("--trajectories_per_file", type=int, default=essential_hparams["trajectories_per_file"], help="Number of trajectories to collect per output file") + parser.add_argument("--steps_per_trajectory", type=int, default=essential_hparams["steps_per_trajectory"], help="Number of steps per trajectory") + parser.add_argument( + "--out_dir", + type=str, + default=os.path.join("/home/lau/sim/DynaTraj", "dataset"), + help="Output directory to store datasets", + ) + parser.add_argument( + "--render", + action="store_true", + help="If set, render frames during collection (requires cv2 or matplotlib)", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # Basic hyperparameter echo + print("[INFO] Hyperparameters:") + print(f" domain/task: {args.domain}/{args.task}") + print(f" seed: {args.seed}") + print(f" trajectories_per_file: {args.trajectories_per_file}") + print(f" steps_per_trajectory: {args.steps_per_trajectory}") + print(f" out_dir: {args.out_dir}") + print(f" render: {args.render}") + + start = time.time() + collect_dmcontrol( + domain=args.domain, + task=args.task, + seed=args.seed, + trajectories_per_file=args.trajectories_per_file, + steps_per_trajectory=args.steps_per_trajectory, + out_dir=args.out_dir, + render=args.render, + ) + elapsed = time.time() - start + print(f"[INFO] Done in {elapsed:.1f}s") \ No newline at end of file diff --git a/sb3_collect.py b/sb3_collect.py new file mode 100644 index 0000000000000000000000000000000000000000..67f24d815254c7be8e60d522e63f5cf7cb69031e --- /dev/null +++ b/sb3_collect.py @@ -0,0 +1,312 @@ +import argparse +import glob +import os +from datetime import datetime +from typing import Dict, List, Tuple + +import numpy as np +from tqdm import tqdm + +import torch + +from dataset import TrajectoryBuffer + +# dm_control +try: + from dm_control import suite +except Exception as e: + raise RuntimeError( + "dm_control is required. Install via: pip install dm-control mujoco" + ) from e + +# Stable Baselines3 +try: + from stable_baselines3 import SAC, PPO, TD3 + from stable_baselines3.common.vec_env import DummyVecEnv +except Exception as e: + raise RuntimeError( + "stable-baselines3 is required. Install via: pip install stable-baselines3" + ) from e + +ALGOS = { + "SAC": SAC, + "PPO": PPO, + "TD3": TD3, +} + + +class _RenderHelper: + def __init__(self): + self.backend = None + self._warned = False + self._cv2 = None + self._plt = None + self._fig = None + self._ax = None + self._im = None + try: + import cv2 # type: ignore + + self._cv2 = cv2 + self.backend = "cv2" + except Exception: + try: + import matplotlib.pyplot as plt # type: ignore + + self._plt = plt + self.backend = "mpl" + self._fig, self._ax = plt.subplots() + self._im = None + plt.ion() + except Exception: + self.backend = None + + def show(self, rgb: np.ndarray): + if self.backend == "cv2" and self._cv2 is not None: + bgr = rgb[..., ::-1] + self._cv2.imshow("sb3_collect", bgr) + self._cv2.waitKey(1) + elif self.backend == "mpl" and self._plt is not None: + if self._im is None: + self._im = self._ax.imshow(rgb) + self._ax.axis("off") + else: + self._im.set_data(rgb) + self._plt.pause(0.001) + else: + if not self._warned: + print("[WARN] Rendering requested but no display backend found (cv2/matplotlib). Skipping render.") + self._warned = True + + def close(self): + if self.backend == "cv2" and self._cv2 is not None: + self._cv2.destroyAllWindows() + elif self.backend == "mpl" and self._plt is not None and self._fig is not None: + self._plt.close(self._fig) + + +# --------- Helpers --------- + +def flatten_env_observation(obs_dict: Dict[str, np.ndarray]) -> Tuple[np.ndarray, List[str]]: + keys = sorted(obs_dict.keys()) + parts = [np.asarray(obs_dict[k], dtype=np.float32).ravel() for k in keys] + return (np.concatenate(parts, axis=0).astype(np.float32), keys) + + +def flatten_obs_with_keys(obs_dict: Dict[str, np.ndarray], keys: List[str]) -> np.ndarray: + parts = [np.asarray(obs_dict[k], dtype=np.float32).ravel() for k in keys] + return np.concatenate(parts, axis=0).astype(np.float32) + + +def build_state_from_physics(physics: "suite.Environment.physics", last_action: np.ndarray) -> np.ndarray: + qpos = np.array(physics.data.qpos, dtype=np.float32).copy() + qvel = np.array(physics.data.qvel, dtype=np.float32).copy() + if qpos.shape[0] >= 7 and qvel.shape[0] >= 6: + root_pos = qpos[0:3] + qwxyz = qpos[3:7] + root_quat = np.array([qwxyz[1], qwxyz[2], qwxyz[3], qwxyz[0]], dtype=np.float32) + root_lin_vel = qvel[0:3] + root_ang_vel = qvel[3:6] + joint_angles = qpos[7:] + joint_vels = qvel[6:] + else: + root_pos = np.zeros(3, dtype=np.float32) + root_quat = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32) + root_lin_vel = np.zeros(3, dtype=np.float32) + root_ang_vel = np.zeros(3, dtype=np.float32) + joint_angles = qpos.astype(np.float32) + joint_vels = qvel.astype(np.float32) + state_parts = [ + joint_angles.astype(np.float32), + joint_vels.astype(np.float32), + root_pos.astype(np.float32), + root_lin_vel.astype(np.float32), + root_quat.astype(np.float32), + root_ang_vel.astype(np.float32), + last_action.astype(np.float32), + ] + return np.concatenate(state_parts, dtype=np.float32) + + +def load_sb3_policy_for_inference(algo_name: str, domain: str, task: str, device: str = "cpu"): + # Create a tiny dummy env to instantiate policy with correct spaces + def _make_env(): + env = suite.load(domain_name=domain, task_name=task, task_kwargs={"random": 0}) + # Build observation size from first reset + obs0, obs_keys = flatten_env_observation(env.reset().observation) + action_spec = env.action_spec() + act_low = np.asarray(action_spec.minimum, dtype=np.float32) + act_high = np.asarray(action_spec.maximum, dtype=np.float32) + # Create a dummy Gym space via sb3 internals by wrapping DummyVecEnv + # We will instantiate the model with a lambda that returns an object with the same spaces + import gymnasium as gym + from gymnasium import spaces + + class DummySpaceEnv(gym.Env): + def __init__(self): + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(obs0.shape[0],), dtype=np.float32) + self.action_space = spaces.Box(low=act_low, high=act_high, shape=action_spec.shape, dtype=np.float32) + def reset(self, *, seed=None, options=None): + return np.zeros_like(obs0), {} + def step(self, action): + return np.zeros_like(obs0), 0.0, True, False, {} + + vec_env = DummyVecEnv([lambda: DummySpaceEnv()]) + return vec_env + + ALGO = ALGOS[algo_name] + vec_env = _make_env() + model = ALGO("MlpPolicy", vec_env, verbose=0, device=device) + model.policy.to(device) + model.policy.eval() + return model + + +def collect_with_checkpoint(env, action_spec, model, target_trajectories: int, steps_per_traj: int, buffer: TrajectoryBuffer, pbar: tqdm, obs_keys: List[str], viewer: _RenderHelper | None): + # Reset env and local counters + ts = env.reset() + prev_action = np.zeros(action_spec.shape, dtype=np.float32) + start_len = len(buffer) + + while (len(buffer) - start_len) < target_trajectories: + # Build state for dataset + state = build_state_from_physics(env.physics, prev_action) + reward = 0.0 if ts.reward is None else float(ts.reward) + done = bool(ts.last()) + + # Append current step (B=1) + obs_np = state[None, :] + ext_obs_np = obs_np + action_np = prev_action[None, :] + reward_np = np.array([reward], dtype=np.float32) + done_np = np.array([done], dtype=np.bool_) + buffer.append_step(obs_np, ext_obs_np, action_np, reward_np, done_np) + + # Policy action from flattened env observation + flat_obs = flatten_obs_with_keys(ts.observation, obs_keys) + action, _ = model.predict(flat_obs, deterministic=True) + action = np.asarray(action, dtype=np.float32).reshape(action_spec.shape) + # Clip to env action bounds + low = np.asarray(action_spec.minimum, dtype=np.float32) + high = np.asarray(action_spec.maximum, dtype=np.float32) + action = np.clip(action, low, high) + + # Step env + ts = env.step(action) + prev_action = action + + # Render + if viewer is not None: + try: + frame = env.physics.render(height=480, width=640, camera_id=0) + viewer.show(frame) + except Exception: + pass + + if ts.last(): + ts = env.reset() + prev_action = np.zeros_like(prev_action) + + # Update progress bar to reflect number of completed trajectories in buffer + pbar.n = len(buffer) + pbar.refresh() + + +# --------- Main pipeline --------- + +def parse_args(): + parser = argparse.ArgumentParser(description="Collect dm_control dataset using specified SB3 checkpoints, one npz per ckpt") + parser.add_argument("--domain", type=str, default="cheetah") + parser.add_argument("--task", type=str, default="run") + parser.add_argument("--algo", type=str, choices=["SAC", "PPO", "TD3"], default="SAC") + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--ckpt_root", type=str, default=os.path.join("/home/lau/sim/DynaTraj", "weights")) + parser.add_argument("--ckpt_indices", type=str, required=True, help="Comma-separated list of checkpoint indices, e.g., 0,10,30,40,50") + parser.add_argument("--trajectories_per_ckpt", type=int, default=5120) + parser.add_argument("--steps_per_trajectory", type=int, default=24) + parser.add_argument("--out_dir", type=str, default=os.path.join("/home/lau/sim/DynaTraj", "dataset")) + parser.add_argument("--device", type=str, default="cpu") + parser.add_argument("--render", action="store_true") + return parser.parse_args() + + +def main(): + args = parse_args() + + # Prepare env + env = suite.load(domain_name=args.domain, task_name=args.task, task_kwargs={"random": args.seed}) + action_spec = env.action_spec() + + # Determine obs flatten order once + ts0 = env.reset() + _, obs_keys = flatten_env_observation(ts0.observation) + + # Parse checkpoint indices + try: + indices = [int(x.strip()) for x in args.ckpt_indices.split(",") if x.strip() != ""] + except Exception: + raise ValueError("Invalid --ckpt_indices. Provide comma-separated integers, e.g., 0,10,30") + + ckpt_dir = os.path.join(args.ckpt_root, args.domain, args.task) + if not os.path.isdir(ckpt_dir): + raise FileNotFoundError(f"Checkpoint directory not found: {ckpt_dir}") + + os.makedirs(args.out_dir, exist_ok=True) + + viewer = _RenderHelper() if args.render else None + + for idx in indices: + ckpt_path = os.path.join(ckpt_dir, f"ckpt-{idx}.pt") + if not os.path.isfile(ckpt_path): + raise FileNotFoundError(f"Checkpoint not found: {ckpt_path}") + + payload = torch.load(ckpt_path, map_location=args.device) + algo_name = payload.get("algo", args.algo) + if algo_name not in ALGOS: + raise ValueError(f"Unsupported algo in checkpoint {ckpt_path}: {algo_name}") + state_dict = payload.get("policy_state_dict", None) + if state_dict is None: + raise RuntimeError(f"policy_state_dict not found in {ckpt_path}") + + # Build model and load policy weights + model = load_sb3_policy_for_inference(algo_name, args.domain, args.task, device=args.device) + model.policy.load_state_dict(state_dict) + model.policy.eval() + + # Collect for this ckpt + buffer = TrajectoryBuffer(args.steps_per_trajectory) + pbar = tqdm(total=args.trajectories_per_ckpt, desc=f"Collecting ckpt-{idx}") + collect_with_checkpoint(env, action_spec, model, args.trajectories_per_ckpt, args.steps_per_trajectory, buffer, pbar, obs_keys, viewer) + pbar.close() + + # Save dataset for this ckpt + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + file_stem = f"sb3_{args.domain}_{args.task}_ckpt{idx:03d}_{timestamp}" + dataset_path = os.path.join(args.out_dir, f"{file_stem}.npz") + buffer.save(dataset_path) + + # Save minimal metadata + meta = { + "domain": args.domain, + "task": args.task, + "algo": args.algo, + "seed": args.seed, + "ckpt_index": idx, + "trajectories_per_ckpt": args.trajectories_per_ckpt, + "steps_per_trajectory": args.steps_per_trajectory, + "total_trajectories": len(buffer), + "total_steps": len(buffer) * args.steps_per_trajectory, + "render": bool(args.render), + } + import pickle + with open(os.path.join(args.out_dir, f"{file_stem}_metadata.pkl"), "wb") as f: + pickle.dump(meta, f) + + print(f"[INFO] Saved ckpt {idx}: {dataset_path} ({len(buffer)} trajectories)") + + if viewer is not None: + viewer.close() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/train_sb3_dmcontrol.py b/train_sb3_dmcontrol.py new file mode 100644 index 0000000000000000000000000000000000000000..b9d3caca9227c4741f8fd24b82ee6cd9878fb089 --- /dev/null +++ b/train_sb3_dmcontrol.py @@ -0,0 +1,203 @@ +import argparse +import os +from datetime import datetime +from typing import Dict, List + +import numpy as np + +# dm_control +try: + from dm_control import suite +except Exception as e: + raise RuntimeError( + "dm_control is required. Install via: pip install dm-control mujoco" + ) from e + +# gym/gymnasium compatibility +try: + import gymnasium as gym +except Exception: + import gym # type: ignore + +# Stable Baselines3 +try: + from stable_baselines3 import SAC, PPO, TD3 + from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines3.common.env_util import make_vec_env + from stable_baselines3.common.callbacks import BaseCallback +except Exception as e: + raise RuntimeError( + "stable-baselines3 is required. Install via: pip install stable-baselines3" + ) from e + +import torch + + +class DmControlGymWrapper(gym.Env): + """A minimal Gym/Gymnasium wrapper for dm_control suite tasks with flattened obs.""" + + metadata = {"render_modes": ["rgb_array"], "render_fps": 60} + + def __init__(self, domain: str, task: str, seed: int | None = None): + super().__init__() + self._domain = domain + self._task = task + self._seed = seed if seed is not None else 0 + self._env = suite.load(domain_name=domain, task_name=task, task_kwargs={"random": self._seed}) + + # Build observation space by flattening dict in sorted key order + example_obs = self._env.reset().observation + self._obs_keys = sorted(example_obs.keys()) + obs_size = int(np.sum([np.asarray(example_obs[k]).size for k in self._obs_keys])) + # Use unbounded space; algorithms usually normalize internally + self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float32) + + # Action space from spec + action_spec = self._env.action_spec() + self._act_low = np.asarray(action_spec.minimum, dtype=np.float32) + self._act_high = np.asarray(action_spec.maximum, dtype=np.float32) + self.action_space = gym.spaces.Box(low=self._act_low, high=self._act_high, shape=action_spec.shape, dtype=np.float32) + + def seed(self, seed: int | None = None): + if seed is not None: + self._seed = seed + # dm_control uses task_kwargs random; re-create env to apply new seed + self._env = suite.load(domain_name=self._domain, task_name=self._task, task_kwargs={"random": self._seed}) + + def _flatten_obs(self, obs_dict: Dict[str, np.ndarray]) -> np.ndarray: + parts: List[np.ndarray] = [] + for k in self._obs_keys: + v = np.asarray(obs_dict[k], dtype=np.float32).ravel() + parts.append(v) + return np.concatenate(parts, axis=0).astype(np.float32) + + def reset(self, *, seed: int | None = None, options: dict | None = None): + if seed is not None: + self.seed(seed) + ts = self._env.reset() + obs = self._flatten_obs(ts.observation) + info = {} + return obs, info + + def step(self, action: np.ndarray): + action = np.asarray(action, dtype=np.float32) + action = np.clip(action, self._act_low, self._act_high) + ts = self._env.step(action) + obs = self._flatten_obs(ts.observation) + reward = 0.0 if ts.reward is None else float(ts.reward) + terminated = bool(ts.last()) + truncated = False # dm_control provides a single 'last' flag + info = {} + if terminated: + # dm_control envs typically auto-reset; we return terminal step and let VecEnv reset + pass + return obs, reward, terminated, truncated, info + + def render(self): + # Return an RGB array + return self._env.physics.render(height=480, width=640, camera_id=0) + + +ALGOS = { + "sac": SAC, + "ppo": PPO, + "td3": TD3, +} + + +class PeriodicCkptCallback(BaseCallback): + """Save policy checkpoint every fixed number of timesteps. + + Saves to weights///ckpt-.pt where k starts from 1. + """ + + def __init__(self, save_root: str, domain: str, task: str, interval: int = 10_000, verbose: int = 1): + super().__init__(verbose) + self.save_root = save_root + self.domain = domain + self.task = task + self.interval = interval + self.saved_count = 0 + self.target_dir = os.path.join(save_root, domain, task) + os.makedirs(self.target_dir, exist_ok=True) + + def _on_step(self) -> bool: + # num_timesteps is global across envs; trigger exactly on multiples + if self.num_timesteps > 0 and self.num_timesteps % self.interval == 0: + self.saved_count += 1 + path = os.path.join(self.target_dir, f"ckpt-{self.saved_count}.pt") + payload = { + "algo": self.model.__class__.__name__, + "domain": self.domain, + "task": self.task, + "num_timesteps": int(self.num_timesteps), + "policy_state_dict": self.model.policy.state_dict(), + } + torch.save(payload, path) + if self.verbose: + print(f"[CKPT] Saved checkpoint #{self.saved_count} at {self.num_timesteps} steps -> {path}") + return True + + +def train(domain: str, task: str, algo: str, total_timesteps: int, n_envs: int, seed: int, device: str, out_dir: str): + # Build vectorized envs + def make_env_fn(rank: int): + def _thunk(): + env = DmControlGymWrapper(domain=domain, task=task, seed=seed + rank) + return env + return _thunk + + vec_env = make_vec_env(make_env_fn(0), n_envs=n_envs, seed=seed, vec_env_cls=SubprocVecEnv if n_envs > 1 else DummyVecEnv) + + ALGO_CLS = ALGOS[algo] + policy = "MlpPolicy" + model = ALGO_CLS(policy, vec_env, verbose=1, seed=seed, device=device) + + # Periodic checkpoint every 10,000 steps + ckpt_cb = PeriodicCkptCallback(save_root=out_dir, domain=domain, task=task, interval=10_000, verbose=1) + + print(f"[INFO] Start training {algo.upper()} on {domain}/{task} for {total_timesteps} steps with {n_envs} envs") + model.learn(total_timesteps=total_timesteps, progress_bar=True, callback=ckpt_cb) + + os.makedirs(out_dir, exist_ok=True) + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + save_stem = f"sb3_{algo}_{domain}-{task}_seed{seed}_{timestamp}" + save_path = os.path.join(out_dir, save_stem) + + model.save(save_path) + print(f"[INFO] Saved model to: {save_path}.zip") + + vec_env.close() + + +def parse_args(): + parser = argparse.ArgumentParser(description="Train dm_control task with Stable Baselines3 and save weights") + parser.add_argument("--domain", type=str, default="cheetah", help="dm_control domain (e.g., cheetah, quadruped)") + parser.add_argument("--task", type=str, default="run", help="dm_control task (e.g., run, walk)") + parser.add_argument("--algo", type=str, choices=list(ALGOS.keys()), default="sac", help="RL algorithm") + parser.add_argument("--total_timesteps", type=int, default=500_000, help="Total training steps") + parser.add_argument("--n_envs", type=int, default=1, help="Number of parallel envs") + parser.add_argument("--seed", type=int, default=0, help="Random seed") + parser.add_argument("--device", type=str, default="auto", help="Device: cpu, cuda, or auto") + parser.add_argument( + "--out_dir", + type=str, + default=os.path.join("/home/lau/sim/DynaTraj", "weights"), + help="Directory to save trained weights", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + train( + domain=args.domain, + task=args.task, + algo=args.algo, + total_timesteps=args.total_timesteps, + n_envs=args.n_envs, + seed=args.seed, + device=args.device, + out_dir=args.out_dir, + ) \ No newline at end of file diff --git a/weights/cheetah/run/ckpt-1.pt b/weights/cheetah/run/ckpt-1.pt new file mode 100644 index 0000000000000000000000000000000000000000..c2b24ae38e3485b3b60de9094f469614907aae30 --- /dev/null +++ b/weights/cheetah/run/ckpt-1.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0744eb2316e53296a7bb3811589465f1914b795240e1986654e5b82bd2d6c82 +size 1459154 diff --git a/weights/cheetah/run/ckpt-10.pt b/weights/cheetah/run/ckpt-10.pt new file mode 100644 index 0000000000000000000000000000000000000000..341c97fad10eec25adb891af524d6d2f2915af6e --- /dev/null +++ b/weights/cheetah/run/ckpt-10.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab0240eeb12715f4ba0f8b3bec6ba8f8725ca4fa75d57cb9b29b11c45203264f +size 1460598 diff --git a/weights/cheetah/run/ckpt-11.pt b/weights/cheetah/run/ckpt-11.pt new file mode 100644 index 0000000000000000000000000000000000000000..215d84fc8533a268a2e2159828da96cbdb586542 --- /dev/null +++ b/weights/cheetah/run/ckpt-11.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f53d640d8594161a2dd0da4a3b6d4ee966dab8caf5e90370ecf5f14ba996622 +size 1460598 diff --git a/weights/cheetah/run/ckpt-12.pt b/weights/cheetah/run/ckpt-12.pt new file mode 100644 index 0000000000000000000000000000000000000000..5c5b745ffadff4b83cac4bb8306d5a6227a4493c --- /dev/null +++ b/weights/cheetah/run/ckpt-12.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125da2ecf17e53970cbcf75077e85784201e318daff39ad6e9f494ca0bd35718 +size 1460598 diff --git a/weights/cheetah/run/ckpt-13.pt b/weights/cheetah/run/ckpt-13.pt new file mode 100644 index 0000000000000000000000000000000000000000..2bd30d0add97fbc7f428272f7845b48299baf74a --- /dev/null +++ b/weights/cheetah/run/ckpt-13.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c6334cbc92cc2b4efed8b4d5691159a441dc1caf90fe1e9f5c3a4be39f913b7 +size 1460598 diff --git a/weights/cheetah/run/ckpt-14.pt b/weights/cheetah/run/ckpt-14.pt new file mode 100644 index 0000000000000000000000000000000000000000..fb46c5845fd31af09b349648f2bd435c7cb6598d --- /dev/null +++ b/weights/cheetah/run/ckpt-14.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a111154a97f3e9430cc4c8755bc5f8737a1c69762127c205ee63306d4fccd8d +size 1460598 diff --git a/weights/cheetah/run/ckpt-15.pt b/weights/cheetah/run/ckpt-15.pt new file mode 100644 index 0000000000000000000000000000000000000000..5738ef769b01975c5922001dd23aa917ac5143b5 --- /dev/null +++ b/weights/cheetah/run/ckpt-15.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b325021e2192d6c9f1d049e5a394133fd6eaa790bee8245b31620e80f6e0970a +size 1460598 diff --git a/weights/cheetah/run/ckpt-16.pt b/weights/cheetah/run/ckpt-16.pt new file mode 100644 index 0000000000000000000000000000000000000000..9e287ce01b5acbcf54ef513cc7c015093148605a --- /dev/null +++ b/weights/cheetah/run/ckpt-16.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e1b2cdb115dc8de995da07a795f0b53097f5cf2202cf324e4b9241eebd89386 +size 1460598 diff --git a/weights/cheetah/run/ckpt-17.pt b/weights/cheetah/run/ckpt-17.pt new file mode 100644 index 0000000000000000000000000000000000000000..9eb600a0945ca725aaa0ea553443b653260aedfd --- /dev/null +++ b/weights/cheetah/run/ckpt-17.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c7d38bfafd40fb232168155bb45f5104b5784cfdc130ae0a6a1333d1d442d25 +size 1460598 diff --git a/weights/cheetah/run/ckpt-18.pt b/weights/cheetah/run/ckpt-18.pt new file mode 100644 index 0000000000000000000000000000000000000000..193e16931947f27b2a0d9491a2f29bbd1b12aa03 --- /dev/null +++ b/weights/cheetah/run/ckpt-18.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:416cee415fa810944cc8481187c37ecf6804b9182ac8b1d31033062f939662e3 +size 1460598 diff --git a/weights/cheetah/run/ckpt-19.pt b/weights/cheetah/run/ckpt-19.pt new file mode 100644 index 0000000000000000000000000000000000000000..c0e3c63dd7f1f99ae7efb5d78f98220ebb1a9308 --- /dev/null +++ b/weights/cheetah/run/ckpt-19.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eafb01b3ffd9b7b9c95d55957ef4d84adca155acac7f420515ec077b1677ab0b +size 1460598 diff --git a/weights/cheetah/run/ckpt-2.pt b/weights/cheetah/run/ckpt-2.pt new file mode 100644 index 0000000000000000000000000000000000000000..34e5337190e6dde507a321d9dc645b524e4c159b --- /dev/null +++ b/weights/cheetah/run/ckpt-2.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65b441d70cfd553323456c5ba844afc0c8e4a7e15e637eedf6a624f31800e3a2 +size 1459154 diff --git a/weights/cheetah/run/ckpt-20.pt b/weights/cheetah/run/ckpt-20.pt new file mode 100644 index 0000000000000000000000000000000000000000..dfd45e9a8875ef9d877e0dcf1361465a5e7c5cad --- /dev/null +++ b/weights/cheetah/run/ckpt-20.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e7be04d7c13c84add27a93c0c6731cf9fa58cac5a8d6b3e2f441a29b2a7d4c2 +size 1460598 diff --git a/weights/cheetah/run/ckpt-21.pt b/weights/cheetah/run/ckpt-21.pt new file mode 100644 index 0000000000000000000000000000000000000000..0d5207d929071eb717375aa73b26a02985b68485 --- /dev/null +++ b/weights/cheetah/run/ckpt-21.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:193aeb405461d5cd6e0d1073644b03995f53bcd1e7ab8221356deb0e1408cffe +size 1460598 diff --git a/weights/cheetah/run/ckpt-22.pt b/weights/cheetah/run/ckpt-22.pt new file mode 100644 index 0000000000000000000000000000000000000000..fb0f8dc2fa21c36783fd461c2cf1085b80ec7ec7 --- /dev/null +++ b/weights/cheetah/run/ckpt-22.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1125e4a3768c66cb38d5049113efd0ca50d984649ada8da1530d33846602c98d +size 1460598 diff --git a/weights/cheetah/run/ckpt-23.pt b/weights/cheetah/run/ckpt-23.pt new file mode 100644 index 0000000000000000000000000000000000000000..91676aa73050be0aecfb8c32cb4c774c372e71be --- /dev/null +++ b/weights/cheetah/run/ckpt-23.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d06c5d9d14a729873b79a4e355ec3873f2c29f251f04de26b2c6badbc726e271 +size 1460598 diff --git a/weights/cheetah/run/ckpt-24.pt b/weights/cheetah/run/ckpt-24.pt new file mode 100644 index 0000000000000000000000000000000000000000..4bf5db6dee967490c83d825c5b776055884a6a8a --- /dev/null +++ b/weights/cheetah/run/ckpt-24.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0639478f8c7d1ac6a938540a32692e396a2711400908900d40d54b1c1c72731e +size 1460598 diff --git a/weights/cheetah/run/ckpt-25.pt b/weights/cheetah/run/ckpt-25.pt new file mode 100644 index 0000000000000000000000000000000000000000..cc4b831b43e8f75a9a70b038ceb3c317296e2c2a --- /dev/null +++ b/weights/cheetah/run/ckpt-25.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cdc5913accef1dd2646c0490ad4fbd61ae84bbb52a316c004a38499a6c33d8a +size 1460598 diff --git a/weights/cheetah/run/ckpt-26.pt b/weights/cheetah/run/ckpt-26.pt new file mode 100644 index 0000000000000000000000000000000000000000..b8e600375a6c65b42049a590d1044718ef31e1d8 --- /dev/null +++ b/weights/cheetah/run/ckpt-26.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0305e88d849bf903c3f1accc3305bc94b491c4b9768a1393c1e991978ce4eeec +size 1460598 diff --git a/weights/cheetah/run/ckpt-27.pt b/weights/cheetah/run/ckpt-27.pt new file mode 100644 index 0000000000000000000000000000000000000000..907a9b1ba1fc84444322183d36f768a1d8e56dce --- /dev/null +++ b/weights/cheetah/run/ckpt-27.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7772f7939aafa0824d57462f7706d5694d0adb9c5c71b3334e364f8e821c8054 +size 1460598 diff --git a/weights/cheetah/run/ckpt-28.pt b/weights/cheetah/run/ckpt-28.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a03bf63751b8a66f5b6ac6d35434a14e0b134cb --- /dev/null +++ b/weights/cheetah/run/ckpt-28.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c80d7a3c7fa5de07ae9d85a2cea2f09f68587b6672c556110f5749492fdb8296 +size 1460598 diff --git a/weights/cheetah/run/ckpt-29.pt b/weights/cheetah/run/ckpt-29.pt new file mode 100644 index 0000000000000000000000000000000000000000..89ec3bf6c1942dc13b2b5d017787f387df71ce28 --- /dev/null +++ b/weights/cheetah/run/ckpt-29.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b8563160a488c65265aafd1b1dd5b9445ad590cac5818b137dcd8145b6e96e1 +size 1460598 diff --git a/weights/cheetah/run/ckpt-3.pt b/weights/cheetah/run/ckpt-3.pt new file mode 100644 index 0000000000000000000000000000000000000000..180a9fb10815994d907fc7966e2d658cfd478ab4 --- /dev/null +++ b/weights/cheetah/run/ckpt-3.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05dadc4b5300a18d0b90c1228ceb68c80b45be90effdd80fe9880fbae54471fc +size 1459154 diff --git a/weights/cheetah/run/ckpt-30.pt b/weights/cheetah/run/ckpt-30.pt new file mode 100644 index 0000000000000000000000000000000000000000..ca5365f6cd441fbf38bd51907d4a73af51b12c3e --- /dev/null +++ b/weights/cheetah/run/ckpt-30.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd1e1bd5b24961734b43e2732a8440030203b9987c2a04c9aaf274c922e74943 +size 1460598 diff --git a/weights/cheetah/run/ckpt-31.pt b/weights/cheetah/run/ckpt-31.pt new file mode 100644 index 0000000000000000000000000000000000000000..cbdd24318a4863100ff8cc9960dbc0e7e09b3f07 --- /dev/null +++ b/weights/cheetah/run/ckpt-31.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47c2b5479e4bf6940a333ec7109995a94ed02526e102740f5cc923d9d30e2203 +size 1460598 diff --git a/weights/cheetah/run/ckpt-32.pt b/weights/cheetah/run/ckpt-32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c7f4e08d86d588a40a76d2f64bf66c62c8070da9 --- /dev/null +++ b/weights/cheetah/run/ckpt-32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:140b1c07a07562830610b587ad49b21d717afd40390496e82e5e0aa91b1229a6 +size 1460598 diff --git a/weights/cheetah/run/ckpt-33.pt b/weights/cheetah/run/ckpt-33.pt new file mode 100644 index 0000000000000000000000000000000000000000..f47bd04135645e7e6fc1a71b01b616ae5f4d04c6 --- /dev/null +++ b/weights/cheetah/run/ckpt-33.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bb557e94f529f4cbd098cd21b11f9de098ab91546d1263f1094c97e2fb71948 +size 1460598 diff --git a/weights/cheetah/run/ckpt-34.pt b/weights/cheetah/run/ckpt-34.pt new file mode 100644 index 0000000000000000000000000000000000000000..45a95b4a7fc2ca6bba9b32df5d98e843ba728d8d --- /dev/null +++ b/weights/cheetah/run/ckpt-34.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd64da97d4f9ec48f051a5387b0834d2d49ca0ba8cd59bf8650f7dde76530df5 +size 1460598 diff --git a/weights/cheetah/run/ckpt-35.pt b/weights/cheetah/run/ckpt-35.pt new file mode 100644 index 0000000000000000000000000000000000000000..dbe562b0f59599a9ac92059d7e532f392749cd54 --- /dev/null +++ b/weights/cheetah/run/ckpt-35.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95a0abbd8aef1ad5edb6d243e43211d47ac62074d12f511a912833c0cd9f1351 +size 1460598 diff --git a/weights/cheetah/run/ckpt-36.pt b/weights/cheetah/run/ckpt-36.pt new file mode 100644 index 0000000000000000000000000000000000000000..2cfff8fc679c91b5722e8cc7fb33e7d208c08b13 --- /dev/null +++ b/weights/cheetah/run/ckpt-36.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce685c3e09e2aff6b090551cb73616cdd367041a7e7003c1c29ccc2418a92e8d +size 1460598 diff --git a/weights/cheetah/run/ckpt-37.pt b/weights/cheetah/run/ckpt-37.pt new file mode 100644 index 0000000000000000000000000000000000000000..a06f19ade12fb0cda7095fc98d94c77003c84cfa --- /dev/null +++ b/weights/cheetah/run/ckpt-37.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b5d14399d70e9bd4ce15b48f4893d7e91573b733851aafdc93ba935c6d6917f +size 1460598 diff --git a/weights/cheetah/run/ckpt-38.pt b/weights/cheetah/run/ckpt-38.pt new file mode 100644 index 0000000000000000000000000000000000000000..ff3fa50e3cb6df07ce26a585ff9cd67662eaca88 --- /dev/null +++ b/weights/cheetah/run/ckpt-38.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ec0976035b637fd99fcd87aa2ab686a9c03735af8167d8ceba8694cca232608 +size 1460598 diff --git a/weights/cheetah/run/ckpt-39.pt b/weights/cheetah/run/ckpt-39.pt new file mode 100644 index 0000000000000000000000000000000000000000..747fad8ed109b4e0d97a9c430006f4645833ac3f --- /dev/null +++ b/weights/cheetah/run/ckpt-39.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7afea2be51084fb85760fe0e6d766dc927955e68169ac423d8ba48cefc39bed0 +size 1460598 diff --git a/weights/cheetah/run/ckpt-4.pt b/weights/cheetah/run/ckpt-4.pt new file mode 100644 index 0000000000000000000000000000000000000000..97c99a2a3ba39c2ca073f6d39dbe44f81c9f2f8b --- /dev/null +++ b/weights/cheetah/run/ckpt-4.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adbb93d31aec73e5f529557a3f4000a89f64ded4595b481c64dc16af9f1e0746 +size 1459154 diff --git a/weights/cheetah/run/ckpt-40.pt b/weights/cheetah/run/ckpt-40.pt new file mode 100644 index 0000000000000000000000000000000000000000..06708fe2b96560aa3d38ab8d7eaa2acf6c519f69 --- /dev/null +++ b/weights/cheetah/run/ckpt-40.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f9f4cb9afb7177b124cf10b20d9c70627e050a03ccb77afdcfbaa14b9cd53ba +size 1460598 diff --git a/weights/cheetah/run/ckpt-41.pt b/weights/cheetah/run/ckpt-41.pt new file mode 100644 index 0000000000000000000000000000000000000000..7cda4bae71746dcff298b24a33cb1fdcdbc30036 --- /dev/null +++ b/weights/cheetah/run/ckpt-41.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8c278656e5da023ea8841aa822f6e57c95a8c422f709f227b6756afe87e99e5 +size 1460598 diff --git a/weights/cheetah/run/ckpt-42.pt b/weights/cheetah/run/ckpt-42.pt new file mode 100644 index 0000000000000000000000000000000000000000..1f1098db665937162bcf8a7ec6bc979c38f5a03a --- /dev/null +++ b/weights/cheetah/run/ckpt-42.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ee5e7f27043f6b19d840dae30f971e05b2c92e4c5a7e8685796da4450a08e8 +size 1460598 diff --git a/weights/cheetah/run/ckpt-43.pt b/weights/cheetah/run/ckpt-43.pt new file mode 100644 index 0000000000000000000000000000000000000000..60367046f6a3cb5b90b5e6f04e83b41f8732223c --- /dev/null +++ b/weights/cheetah/run/ckpt-43.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ed13f0b0a480bda85ab9f63f542b6a26e56961833eb891681abbc7806877fc6 +size 1460598 diff --git a/weights/cheetah/run/ckpt-44.pt b/weights/cheetah/run/ckpt-44.pt new file mode 100644 index 0000000000000000000000000000000000000000..dccbb8b27adc8a27884784be3bc4cca28517fe52 --- /dev/null +++ b/weights/cheetah/run/ckpt-44.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3374fede1f3a0ed75c23b54d5813c1d3d64d20dff1276a9b97f402f88b7fd24b +size 1460598 diff --git a/weights/cheetah/run/ckpt-45.pt b/weights/cheetah/run/ckpt-45.pt new file mode 100644 index 0000000000000000000000000000000000000000..7764115bf9e319e56424b468d3b9258183529ae2 --- /dev/null +++ b/weights/cheetah/run/ckpt-45.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ff8a423837799c2d8853598abdd7ea3ce93db5a08645050f68089cc55a8f6e5 +size 1460598 diff --git a/weights/cheetah/run/ckpt-46.pt b/weights/cheetah/run/ckpt-46.pt new file mode 100644 index 0000000000000000000000000000000000000000..bff5b5d377ddd463a83c13695966a38e8fcdb56e --- /dev/null +++ b/weights/cheetah/run/ckpt-46.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23fb3bfc0e2ad3d095d9bb3f8e9dcdf15aa3ad6d458e90d9f0bd6059cc7de327 +size 1460598 diff --git a/weights/cheetah/run/ckpt-47.pt b/weights/cheetah/run/ckpt-47.pt new file mode 100644 index 0000000000000000000000000000000000000000..40c02488734ed5d50f964f1d4eff080d1072a5ca --- /dev/null +++ b/weights/cheetah/run/ckpt-47.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a5fd79c53fb1845dee5ffbcb71b81f300af313df95f4023c38e8313089af27b +size 1460598 diff --git a/weights/cheetah/run/ckpt-48.pt b/weights/cheetah/run/ckpt-48.pt new file mode 100644 index 0000000000000000000000000000000000000000..380ea752196bd3915d2d011e0969aa3c8981b38f --- /dev/null +++ b/weights/cheetah/run/ckpt-48.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71f66edf306fee8a66a605358e5f301391bcb3439cdf26cacd77258392119671 +size 1460598 diff --git a/weights/cheetah/run/ckpt-49.pt b/weights/cheetah/run/ckpt-49.pt new file mode 100644 index 0000000000000000000000000000000000000000..520d7723cf1cc05d0368943dfa9788cd0d319f5c --- /dev/null +++ b/weights/cheetah/run/ckpt-49.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2efbcf02c4db0fa55cfec5c39cefa5a6b87118939f68b09c14f5878e07d8ed66 +size 1460598 diff --git a/weights/cheetah/run/ckpt-5.pt b/weights/cheetah/run/ckpt-5.pt new file mode 100644 index 0000000000000000000000000000000000000000..e718ceff467a2be07a56640fcaf8bc7bc03788ed --- /dev/null +++ b/weights/cheetah/run/ckpt-5.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb8028ef5c9a2b79effdaa1277ea526822b96bc777b8a63f286567c39e6a3655 +size 1459154 diff --git a/weights/cheetah/run/ckpt-50.pt b/weights/cheetah/run/ckpt-50.pt new file mode 100644 index 0000000000000000000000000000000000000000..d568d4838db9e53891128a17263561df99219836 --- /dev/null +++ b/weights/cheetah/run/ckpt-50.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11982b5af1d52dd53c239d16e28e019a5a2b3e4e9a79db62d80cf12bade5501c +size 1460598 diff --git a/weights/cheetah/run/ckpt-6.pt b/weights/cheetah/run/ckpt-6.pt new file mode 100644 index 0000000000000000000000000000000000000000..39a037c662aa996b049d8206fe7b10a9ae235225 --- /dev/null +++ b/weights/cheetah/run/ckpt-6.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40d2130bdc753e154c4ae14ca7c433afcefbefcc023d5740c68d941838162f40 +size 1459154 diff --git a/weights/cheetah/run/ckpt-7.pt b/weights/cheetah/run/ckpt-7.pt new file mode 100644 index 0000000000000000000000000000000000000000..c82e19e3ed0b1d9ba8e5fb344ed64d9a5ea5983c --- /dev/null +++ b/weights/cheetah/run/ckpt-7.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73bed665712f9a0c362ae00bec9cc4d1e850b7660f169bb4b8f2d01065d5d88e +size 1459218 diff --git a/weights/cheetah/run/ckpt-8.pt b/weights/cheetah/run/ckpt-8.pt new file mode 100644 index 0000000000000000000000000000000000000000..4a12b8228603553c0b75b0f59c7b421b77bdd720 --- /dev/null +++ b/weights/cheetah/run/ckpt-8.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43ae086793cb713f1288393dba43c424a3b064947c85152d1d6efb2b8586e64b +size 1459218 diff --git a/weights/cheetah/run/ckpt-9.pt b/weights/cheetah/run/ckpt-9.pt new file mode 100644 index 0000000000000000000000000000000000000000..4c1db1d485a2621e7c8877e06b900af31a27073b --- /dev/null +++ b/weights/cheetah/run/ckpt-9.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abfd0fe67083853f62b1c189ce684cf668529d6ce2c5d051da7a610307ca11f5 +size 1459218 diff --git a/weights/sb3_sac_cheetah-run_seed0_2025-08-08_01-21-11.zip b/weights/sb3_sac_cheetah-run_seed0_2025-08-08_01-21-11.zip new file mode 100644 index 0000000000000000000000000000000000000000..cef6553cc9e152b268848041e1f02caad2bc5d26 --- /dev/null +++ b/weights/sb3_sac_cheetah-run_seed0_2025-08-08_01-21-11.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:205179613d98006a7cebd2ff55cdf22bb698c8b4abee27779b4816de5aef2bc3 +size 3240333