| |
| """ |
| Train LeRobot imitation-learning policies on an SO-101 dataset with W&B logging. |
| |
| Install: |
| pip install lerobot huggingface_hub wandb torch pyyaml |
| pip install 'lerobot[diffusion]' # needed for diffusion policy configs |
| |
| Login once: |
| huggingface-cli login # optional for public dataset, useful generally |
| wandb login |
| |
| Run: |
| python train_example.py --config-name act_arch_wide_192 |
| python train_example.py --config-name act_arch_wide_192 \ |
| --dataset-repos user/dataset_a user/dataset_b --aggregate-repo user/a_b_merged |
| python train_example.py --config-name act_arch_wide_192_hil_finetune |
| |
| Fine-tuning: |
| Set `pretrained_policy_path` to a previous `pretrained_model` directory. This |
| starts a new run initialized from that checkpoint. It is different from |
| LeRobot `resume`, which continues the exact same interrupted run. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import copy |
| import hashlib |
| import os |
| import shutil |
| import subprocess |
| from pathlib import Path |
| from typing import Any |
|
|
|
|
| DEFAULT_DATASET_REPO = "Smencomojica/robotics_class_2" |
| DEFAULT_CONFIG_FILE = Path(__file__).with_name("train_configs.yaml") |
|
|
|
|
| def default_lerobot_home() -> Path: |
| hf_home = Path(os.environ.get("HF_HOME", Path.home() / ".cache" / "huggingface")) |
| return Path(os.environ.get("HF_LEROBOT_HOME", hf_home / "lerobot")) |
|
|
|
|
| def dataset_local_dir(repo_id: str) -> Path: |
| return default_lerobot_home() / repo_id |
|
|
|
|
| def default_aggregate_repo_id(dataset_repos: list[str]) -> str: |
| digest = hashlib.sha1("\n".join(dataset_repos).encode("utf-8")).hexdigest()[:8] |
| return f"local/merged_{digest}" |
|
|
|
|
| def looks_downloaded(path: Path) -> bool: |
| return ( |
| path.exists() |
| and (path / "meta").exists() |
| and any(path.rglob("*.parquet")) |
| ) |
|
|
|
|
| def normalize_dataset_repos(value: Any) -> list[str]: |
| if value is None: |
| return [] |
| if isinstance(value, str): |
| return [value] |
| if isinstance(value, list) and all(isinstance(item, str) for item in value): |
| return value |
| raise ValueError("`dataset_repos` must be a string or a list of strings.") |
|
|
|
|
| def normalize_string_list(value: Any, field_name: str) -> list[str]: |
| if value is None: |
| return [] |
| if isinstance(value, str): |
| return [value] |
| if isinstance(value, list) and all(isinstance(item, str) for item in value): |
| return value |
| raise ValueError(f"`{field_name}` must be a string or a list of strings.") |
|
|
|
|
| def pick_device() -> str: |
| import torch |
|
|
| if torch.cuda.is_available(): |
| return "cuda" |
| if getattr(torch.backends, "mps", None) and torch.backends.mps.is_available(): |
| return "mps" |
| return "cpu" |
|
|
|
|
| def resolve_config_file(path: str | Path) -> Path: |
| config_file = Path(path) |
| if config_file.exists(): |
| return config_file |
|
|
| script_relative = Path(__file__).parent / config_file |
| if script_relative.exists(): |
| return script_relative |
|
|
| raise FileNotFoundError(f"Could not find config file: {path}") |
|
|
|
|
| def load_sweep_configs(config_file: Path) -> dict[str, Any]: |
| try: |
| import yaml |
| except ModuleNotFoundError as exc: |
| raise RuntimeError( |
| "PyYAML is required for --config-file support. Install it with `pip install pyyaml`." |
| ) from exc |
|
|
| with config_file.open("r", encoding="utf-8") as f: |
| data = yaml.safe_load(f) or {} |
|
|
| if not isinstance(data, dict): |
| raise ValueError(f"{config_file} must contain a YAML mapping.") |
| if "configs" not in data or not isinstance(data["configs"], dict): |
| raise ValueError(f"{config_file} must define a `configs` mapping.") |
|
|
| defaults = data.get("defaults", {}) |
| if defaults is None: |
| defaults = {} |
| if not isinstance(defaults, dict): |
| raise ValueError(f"{config_file} `defaults` must be a mapping.") |
|
|
| policy_defaults = data.get("policy_defaults", {}) |
| if policy_defaults is None: |
| policy_defaults = {} |
| if not isinstance(policy_defaults, dict): |
| raise ValueError(f"{config_file} `policy_defaults` must be a mapping.") |
|
|
| return { |
| "defaults": defaults, |
| "policy_defaults": policy_defaults, |
| "configs": data["configs"], |
| } |
|
|
|
|
| def deep_merge(base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]: |
| merged = copy.deepcopy(base) |
| for key, value in override.items(): |
| if isinstance(value, dict) and isinstance(merged.get(key), dict): |
| merged[key] = deep_merge(merged[key], value) |
| else: |
| merged[key] = copy.deepcopy(value) |
| return merged |
|
|
|
|
| def config_by_name(config_file: Path, config_name: str) -> dict[str, Any]: |
| data = load_sweep_configs(config_file) |
| configs = data["configs"] |
| if config_name not in configs: |
| available = ", ".join(sorted(configs)) |
| raise ValueError( |
| f"Unknown config name: {config_name}. Available configs: {available}" |
| ) |
|
|
| selected = configs[config_name] |
| if selected is None: |
| selected = {} |
| if not isinstance(selected, dict): |
| raise ValueError(f"Config `{config_name}` must be a mapping.") |
|
|
| selected_policy = selected.get("policy", {}) |
| if selected_policy is None: |
| selected_policy = {} |
| if not isinstance(selected_policy, dict): |
| raise ValueError(f"Config `{config_name}` field `policy` must be a mapping.") |
|
|
| defaults_policy = data["defaults"].get("policy", {}) |
| if defaults_policy is None: |
| defaults_policy = {} |
| if not isinstance(defaults_policy, dict): |
| raise ValueError(f"{config_file} `defaults.policy` must be a mapping.") |
|
|
| policy_type = selected_policy.get("type") or defaults_policy.get("type") or "act" |
| policy_defaults = data["policy_defaults"].get(policy_type, {}) |
| if policy_defaults is None: |
| policy_defaults = {} |
| if not isinstance(policy_defaults, dict): |
| raise ValueError( |
| f"{config_file} `policy_defaults.{policy_type}` must be a mapping." |
| ) |
|
|
| cfg = deep_merge(data["defaults"], policy_defaults) |
| return deep_merge(cfg, selected) |
|
|
|
|
| def str_value(value: Any) -> str: |
| if isinstance(value, bool): |
| return str(value).lower() |
| return str(value) |
|
|
|
|
| def add_cli_arg(cmd: list[str], key: str, value: Any) -> None: |
| if value is None: |
| return |
| cmd.append(f"--{key}={str_value(value)}") |
|
|
|
|
| def looks_like_local_path(value: str) -> bool: |
| expanded = Path(value).expanduser() |
| if expanded.is_absolute(): |
| return True |
| if value.startswith((".", "~")): |
| return True |
| |
| |
| return len(expanded.parts) > 2 |
|
|
|
|
| def validate_pretrained_policy_path(value: Any) -> str | None: |
| if value is None: |
| return None |
| if not isinstance(value, str): |
| raise ValueError("`pretrained_policy_path` must be a string.") |
|
|
| path_value = value.strip() |
| if not path_value: |
| raise ValueError("`pretrained_policy_path` must not be empty.") |
|
|
| candidate = Path(path_value).expanduser() |
| if candidate.exists() or looks_like_local_path(path_value): |
| required_files = ("config.json", "model.safetensors") |
| missing = [name for name in required_files if not (candidate / name).is_file()] |
| if missing: |
| missing_list = ", ".join(missing) |
| raise FileNotFoundError( |
| f"`pretrained_policy_path` must point to a LeRobot `pretrained_model` " |
| f"directory containing {missing_list}: {candidate}" |
| ) |
|
|
| return path_value |
|
|
|
|
| def apply_cli_overrides(cfg: dict[str, Any], args: argparse.Namespace) -> dict[str, Any]: |
| overridden = copy.deepcopy(cfg) |
| for key in ( |
| "dataset_repo", |
| "aggregate_repo", |
| "output_dir", |
| "job_name", |
| "steps", |
| "batch_size", |
| "device", |
| "wandb_project", |
| "log_freq", |
| "save_freq", |
| "policy_repo_id", |
| "pretrained_policy_path", |
| ): |
| value = getattr(args, key) |
| if value is not None: |
| overridden[key] = value |
| if args.dataset_repos is not None: |
| overridden["dataset_repos"] = args.dataset_repos |
| if args.aggregate_drop_features is not None: |
| overridden["aggregate_drop_features"] = args.aggregate_drop_features |
| return overridden |
|
|
|
|
| def download_dataset_if_needed(repo_id: str) -> Path: |
| local_dir = dataset_local_dir(repo_id) |
| if looks_downloaded(local_dir): |
| print(f"Dataset already present: {local_dir}") |
| return local_dir |
|
|
| print(f"Downloading dataset {repo_id} to {local_dir}") |
| from huggingface_hub import snapshot_download |
|
|
| local_dir.parent.mkdir(parents=True, exist_ok=True) |
| snapshot_download( |
| repo_id=repo_id, |
| repo_type="dataset", |
| local_dir=str(local_dir), |
| local_dir_use_symlinks=False, |
| ) |
| return local_dir |
|
|
|
|
| def sanitize_dataset_for_aggregation( |
| repo_id: str, |
| root: Path, |
| drop_features: list[str], |
| aggregate_repo: str, |
| source_index: int, |
| ) -> tuple[str, Path]: |
| if not drop_features: |
| return repo_id, root |
|
|
| from lerobot.datasets.dataset_tools import remove_feature |
| from lerobot.datasets.lerobot_dataset import LeRobotDataset |
|
|
| dataset = LeRobotDataset(repo_id, root=root) |
| features_to_drop = [ |
| feature_name for feature_name in drop_features if feature_name in dataset.meta.features |
| ] |
| if not features_to_drop: |
| return repo_id, root |
|
|
| sanitized_repo_id = f"{aggregate_repo}_source_{source_index:02d}_sanitized" |
| sanitized_root = dataset_local_dir(sanitized_repo_id) |
|
|
| if looks_downloaded(sanitized_root): |
| print(f"Sanitized dataset already present: {sanitized_root}") |
| return sanitized_repo_id, sanitized_root |
|
|
| if sanitized_root.exists(): |
| raise FileExistsError( |
| f"Sanitized dataset path exists but does not look complete: {sanitized_root}. " |
| "Remove the incomplete directory or choose a different `aggregate_repo`." |
| ) |
|
|
| print( |
| f"Creating sanitized copy of {repo_id} without features: " |
| + ", ".join(features_to_drop) |
| ) |
| remove_feature( |
| dataset=dataset, |
| feature_names=features_to_drop, |
| output_dir=sanitized_root, |
| repo_id=sanitized_repo_id, |
| ) |
| return sanitized_repo_id, sanitized_root |
|
|
|
|
| def prepare_training_dataset(cfg: dict[str, Any]) -> tuple[str, Path]: |
| dataset_repos = normalize_dataset_repos(cfg.get("dataset_repos")) |
| dataset_repo = cfg.get("dataset_repo", DEFAULT_DATASET_REPO) |
|
|
| if not dataset_repos: |
| dataset_repos = [dataset_repo] |
|
|
| if len(dataset_repos) == 1: |
| repo_id = dataset_repos[0] |
| return repo_id, download_dataset_if_needed(repo_id) |
|
|
| source_roots = [download_dataset_if_needed(repo_id) for repo_id in dataset_repos] |
| aggregate_repo = cfg.get("aggregate_repo") or default_aggregate_repo_id(dataset_repos) |
| aggregate_root = dataset_local_dir(aggregate_repo) |
| drop_features = normalize_string_list( |
| cfg.get("aggregate_drop_features"), "aggregate_drop_features" |
| ) |
|
|
| if looks_downloaded(aggregate_root): |
| print(f"Aggregated dataset already present: {aggregate_root}") |
| return aggregate_repo, aggregate_root |
|
|
| if aggregate_root.exists(): |
| raise FileExistsError( |
| f"Aggregate dataset path exists but does not look complete: {aggregate_root}. " |
| "Choose a different `aggregate_repo` or remove the incomplete directory." |
| ) |
|
|
| aggregate_source_repos = [] |
| aggregate_source_roots = [] |
| for source_index, (repo_id, root) in enumerate(zip(dataset_repos, source_roots)): |
| sanitized_repo_id, sanitized_root = sanitize_dataset_for_aggregation( |
| repo_id=repo_id, |
| root=root, |
| drop_features=drop_features, |
| aggregate_repo=aggregate_repo, |
| source_index=source_index, |
| ) |
| aggregate_source_repos.append(sanitized_repo_id) |
| aggregate_source_roots.append(sanitized_root) |
|
|
| print("Aggregating datasets:") |
| for repo_id in aggregate_source_repos: |
| print(f" {repo_id}") |
| print(f"Aggregate repo id: {aggregate_repo}") |
| print(f"Aggregate local dir: {aggregate_root}") |
|
|
| from lerobot.datasets.aggregate import aggregate_datasets |
|
|
| aggregate_datasets( |
| repo_ids=aggregate_source_repos, |
| roots=aggregate_source_roots, |
| aggr_repo_id=aggregate_repo, |
| aggr_root=aggregate_root, |
| ) |
| return aggregate_repo, aggregate_root |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--config-file", default=str(DEFAULT_CONFIG_FILE)) |
| parser.add_argument("--config-name") |
| parser.add_argument("--list-configs", action="store_true") |
| parser.add_argument("--dataset-repo", default=None) |
| parser.add_argument("--dataset-repos", nargs="+", default=None) |
| parser.add_argument("--aggregate-repo", default=None) |
| parser.add_argument("--aggregate-drop-features", nargs="+", default=None) |
| parser.add_argument("--output-dir", default=None) |
| parser.add_argument("--job-name", default=None) |
| parser.add_argument("--steps", type=int, default=None) |
| parser.add_argument("--batch-size", type=int, default=None) |
| parser.add_argument("--device", default=None) |
| parser.add_argument("--wandb-project", default=None) |
| parser.add_argument("--log-freq", type=int, default=None) |
| parser.add_argument("--save-freq", type=int, default=None) |
| parser.add_argument("--policy-repo-id", default=None) |
| parser.add_argument("--pretrained-policy-path", default=None) |
| args = parser.parse_args() |
|
|
| config_file = resolve_config_file(args.config_file) |
| sweep_data = load_sweep_configs(config_file) |
|
|
| if args.list_configs: |
| print("Available configs:") |
| for name in sorted(sweep_data["configs"]): |
| print(f" {name}") |
| return |
|
|
| if not args.config_name: |
| available = ", ".join(sorted(sweep_data["configs"])) |
| parser.error(f"--config-name is required. Available configs: {available}") |
|
|
| cfg = apply_cli_overrides(config_by_name(config_file, args.config_name), args) |
|
|
| output_dir = cfg.get("output_dir", "outputs/train/act_so101_lcc") |
| job_name = cfg.get("job_name", args.config_name) |
| steps = cfg.get("steps", 20_000) |
| batch_size = cfg.get("batch_size", 32) |
| device = cfg.get("device") or pick_device() |
| wandb_project = cfg.get("wandb_project", "lerobot-so101-act-lcc") |
| log_freq = cfg.get("log_freq", 100) |
| save_freq = cfg.get("save_freq", 4_000) |
| policy_repo_id = cfg.get("policy_repo_id") |
| pretrained_policy_path = validate_pretrained_policy_path( |
| cfg.get("pretrained_policy_path") |
| ) |
| policy_cfg = cfg.get("policy", {}) |
|
|
| if not isinstance(policy_cfg, dict): |
| raise ValueError(f"Config `{args.config_name}` field `policy` must be a mapping.") |
|
|
| print(f"Selected config: {args.config_name}") |
| print("Selected device:", device) |
|
|
| if shutil.which("lerobot-train") is None: |
| raise RuntimeError( |
| "Could not find `lerobot-train`. Install LeRobot first: pip install lerobot" |
| ) |
|
|
| dataset_repo, local_dir = prepare_training_dataset(cfg) |
|
|
| os.environ.setdefault("WANDB_PROJECT", wandb_project) |
|
|
| cmd = [ |
| "lerobot-train", |
| f"--dataset.repo_id={dataset_repo}", |
| f"--dataset.root={local_dir}", |
| f"--output_dir={output_dir}", |
| f"--job_name={job_name}", |
| f"--policy.device={device}", |
| "--wandb.enable=true", |
| "--policy.push_to_hub=false", |
| f"--steps={steps}", |
| f"--batch_size={batch_size}", |
| f"--log_freq={log_freq}", |
| f"--save_freq={save_freq}", |
| ] |
|
|
| for key, value in policy_cfg.items(): |
| add_cli_arg(cmd, f"policy.{key}", value) |
|
|
| if pretrained_policy_path: |
| cmd.append(f"--policy.pretrained_path={pretrained_policy_path}") |
|
|
| if policy_repo_id: |
| cmd.append(f"--policy.repo_id={policy_repo_id}") |
|
|
| print("\nRunning:\n " + " \\\n ".join(cmd) + "\n") |
| subprocess.run(cmd, check=True) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|