|
|
import argparse |
|
|
import os |
|
|
from datetime import datetime |
|
|
from typing import Dict, List |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
|
|
|
try: |
|
|
from dm_control import suite |
|
|
except Exception as e: |
|
|
raise RuntimeError( |
|
|
"dm_control is required. Install via: pip install dm-control mujoco" |
|
|
) from e |
|
|
|
|
|
|
|
|
try: |
|
|
import gymnasium as gym |
|
|
except Exception: |
|
|
import gym |
|
|
|
|
|
|
|
|
try: |
|
|
from stable_baselines3 import SAC, PPO, TD3 |
|
|
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv |
|
|
from stable_baselines3.common.env_util import make_vec_env |
|
|
from stable_baselines3.common.callbacks import BaseCallback |
|
|
except Exception as e: |
|
|
raise RuntimeError( |
|
|
"stable-baselines3 is required. Install via: pip install stable-baselines3" |
|
|
) from e |
|
|
|
|
|
import torch |
|
|
|
|
|
|
|
|
class DmControlGymWrapper(gym.Env): |
|
|
"""A minimal Gym/Gymnasium wrapper for dm_control suite tasks with flattened obs.""" |
|
|
|
|
|
metadata = {"render_modes": ["rgb_array"], "render_fps": 60} |
|
|
|
|
|
def __init__(self, domain: str, task: str, seed: int | None = None): |
|
|
super().__init__() |
|
|
self._domain = domain |
|
|
self._task = task |
|
|
self._seed = seed if seed is not None else 0 |
|
|
self._env = suite.load(domain_name=domain, task_name=task, task_kwargs={"random": self._seed}) |
|
|
|
|
|
|
|
|
example_obs = self._env.reset().observation |
|
|
self._obs_keys = sorted(example_obs.keys()) |
|
|
obs_size = int(np.sum([np.asarray(example_obs[k]).size for k in self._obs_keys])) |
|
|
|
|
|
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float32) |
|
|
|
|
|
|
|
|
action_spec = self._env.action_spec() |
|
|
self._act_low = np.asarray(action_spec.minimum, dtype=np.float32) |
|
|
self._act_high = np.asarray(action_spec.maximum, dtype=np.float32) |
|
|
self.action_space = gym.spaces.Box(low=self._act_low, high=self._act_high, shape=action_spec.shape, dtype=np.float32) |
|
|
|
|
|
def seed(self, seed: int | None = None): |
|
|
if seed is not None: |
|
|
self._seed = seed |
|
|
|
|
|
self._env = suite.load(domain_name=self._domain, task_name=self._task, task_kwargs={"random": self._seed}) |
|
|
|
|
|
def _flatten_obs(self, obs_dict: Dict[str, np.ndarray]) -> np.ndarray: |
|
|
parts: List[np.ndarray] = [] |
|
|
for k in self._obs_keys: |
|
|
v = np.asarray(obs_dict[k], dtype=np.float32).ravel() |
|
|
parts.append(v) |
|
|
return np.concatenate(parts, axis=0).astype(np.float32) |
|
|
|
|
|
def reset(self, *, seed: int | None = None, options: dict | None = None): |
|
|
if seed is not None: |
|
|
self.seed(seed) |
|
|
ts = self._env.reset() |
|
|
obs = self._flatten_obs(ts.observation) |
|
|
info = {} |
|
|
return obs, info |
|
|
|
|
|
def step(self, action: np.ndarray): |
|
|
action = np.asarray(action, dtype=np.float32) |
|
|
action = np.clip(action, self._act_low, self._act_high) |
|
|
ts = self._env.step(action) |
|
|
obs = self._flatten_obs(ts.observation) |
|
|
reward = 0.0 if ts.reward is None else float(ts.reward) |
|
|
terminated = bool(ts.last()) |
|
|
truncated = False |
|
|
info = {} |
|
|
if terminated: |
|
|
|
|
|
pass |
|
|
return obs, reward, terminated, truncated, info |
|
|
|
|
|
def render(self): |
|
|
|
|
|
return self._env.physics.render(height=480, width=640, camera_id=0) |
|
|
|
|
|
|
|
|
ALGOS = { |
|
|
"sac": SAC, |
|
|
"ppo": PPO, |
|
|
"td3": TD3, |
|
|
} |
|
|
|
|
|
|
|
|
class PeriodicCkptCallback(BaseCallback): |
|
|
"""Save policy checkpoint every fixed number of timesteps. |
|
|
|
|
|
Saves to weights/<domain>/<task>/ckpt-<k>.pt where k starts from 1. |
|
|
""" |
|
|
|
|
|
def __init__(self, save_root: str, domain: str, task: str, interval: int = 10_000, verbose: int = 1): |
|
|
super().__init__(verbose) |
|
|
self.save_root = save_root |
|
|
self.domain = domain |
|
|
self.task = task |
|
|
self.interval = interval |
|
|
self.saved_count = 0 |
|
|
self.target_dir = os.path.join(save_root, domain, task) |
|
|
os.makedirs(self.target_dir, exist_ok=True) |
|
|
|
|
|
def _on_step(self) -> bool: |
|
|
|
|
|
if self.num_timesteps > 0 and self.num_timesteps % self.interval == 0: |
|
|
self.saved_count += 1 |
|
|
path = os.path.join(self.target_dir, f"ckpt-{self.saved_count}.pt") |
|
|
payload = { |
|
|
"algo": self.model.__class__.__name__, |
|
|
"domain": self.domain, |
|
|
"task": self.task, |
|
|
"num_timesteps": int(self.num_timesteps), |
|
|
"policy_state_dict": self.model.policy.state_dict(), |
|
|
} |
|
|
torch.save(payload, path) |
|
|
if self.verbose: |
|
|
print(f"[CKPT] Saved checkpoint #{self.saved_count} at {self.num_timesteps} steps -> {path}") |
|
|
return True |
|
|
|
|
|
|
|
|
def train(domain: str, task: str, algo: str, total_timesteps: int, n_envs: int, seed: int, device: str, out_dir: str): |
|
|
|
|
|
def make_env_fn(rank: int): |
|
|
def _thunk(): |
|
|
env = DmControlGymWrapper(domain=domain, task=task, seed=seed + rank) |
|
|
return env |
|
|
return _thunk |
|
|
|
|
|
vec_env = make_vec_env(make_env_fn(0), n_envs=n_envs, seed=seed, vec_env_cls=SubprocVecEnv if n_envs > 1 else DummyVecEnv) |
|
|
|
|
|
ALGO_CLS = ALGOS[algo] |
|
|
policy = "MlpPolicy" |
|
|
model = ALGO_CLS(policy, vec_env, verbose=1, seed=seed, device=device) |
|
|
|
|
|
|
|
|
ckpt_cb = PeriodicCkptCallback(save_root=out_dir, domain=domain, task=task, interval=10_000, verbose=1) |
|
|
|
|
|
print(f"[INFO] Start training {algo.upper()} on {domain}/{task} for {total_timesteps} steps with {n_envs} envs") |
|
|
model.learn(total_timesteps=total_timesteps, progress_bar=True, callback=ckpt_cb) |
|
|
|
|
|
os.makedirs(out_dir, exist_ok=True) |
|
|
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") |
|
|
save_stem = f"sb3_{algo}_{domain}-{task}_seed{seed}_{timestamp}" |
|
|
save_path = os.path.join(out_dir, save_stem) |
|
|
|
|
|
model.save(save_path) |
|
|
print(f"[INFO] Saved model to: {save_path}.zip") |
|
|
|
|
|
vec_env.close() |
|
|
|
|
|
|
|
|
def parse_args(): |
|
|
parser = argparse.ArgumentParser(description="Train dm_control task with Stable Baselines3 and save weights") |
|
|
parser.add_argument("--domain", type=str, default="cheetah", help="dm_control domain (e.g., cheetah, quadruped)") |
|
|
parser.add_argument("--task", type=str, default="run", help="dm_control task (e.g., run, walk)") |
|
|
parser.add_argument("--algo", type=str, choices=list(ALGOS.keys()), default="sac", help="RL algorithm") |
|
|
parser.add_argument("--total_timesteps", type=int, default=500_000, help="Total training steps") |
|
|
parser.add_argument("--n_envs", type=int, default=1, help="Number of parallel envs") |
|
|
parser.add_argument("--seed", type=int, default=0, help="Random seed") |
|
|
parser.add_argument("--device", type=str, default="auto", help="Device: cpu, cuda, or auto") |
|
|
parser.add_argument( |
|
|
"--out_dir", |
|
|
type=str, |
|
|
default=os.path.join("/home/lau/sim/DynaTraj", "weights"), |
|
|
help="Directory to save trained weights", |
|
|
) |
|
|
return parser.parse_args() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
args = parse_args() |
|
|
|
|
|
train( |
|
|
domain=args.domain, |
|
|
task=args.task, |
|
|
algo=args.algo, |
|
|
total_timesteps=args.total_timesteps, |
|
|
n_envs=args.n_envs, |
|
|
seed=args.seed, |
|
|
device=args.device, |
|
|
out_dir=args.out_dir, |
|
|
) |