|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| import builtins
|
| import datetime as dt
|
| import os
|
| from dataclasses import dataclass, field
|
| from pathlib import Path
|
| from typing import Any
|
|
|
| import draccus
|
| from huggingface_hub import hf_hub_download
|
| from huggingface_hub.errors import HfHubHTTPError
|
|
|
| from lerobot import envs
|
| from lerobot.configs import parser
|
| from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig
|
| from lerobot.configs.policies import PreTrainedConfig
|
| from lerobot.optim import OptimizerConfig
|
| from lerobot.optim.schedulers import LRSchedulerConfig
|
| from lerobot.utils.hub import HubMixin
|
|
|
| TRAIN_CONFIG_NAME = "train_config.json"
|
|
|
|
|
| @dataclass
|
| class TrainPipelineConfig(HubMixin):
|
| dataset: DatasetConfig
|
| env: envs.EnvConfig | None = None
|
| policy: PreTrainedConfig | None = None
|
|
|
|
|
| output_dir: Path | None = None
|
| job_name: str | None = None
|
|
|
|
|
|
|
|
|
| resume: bool = False
|
|
|
|
|
| seed: int | None = 1000
|
|
|
| num_workers: int = 4
|
| batch_size: int = 8
|
| steps: int = 100_000
|
| eval_freq: int = 20_000
|
| log_freq: int = 200
|
| save_checkpoint: bool = True
|
|
|
| save_freq: int = 20_000
|
| use_policy_training_preset: bool = True
|
| optimizer: OptimizerConfig | None = None
|
| scheduler: LRSchedulerConfig | None = None
|
| eval: EvalConfig = field(default_factory=EvalConfig)
|
| wandb: WandBConfig = field(default_factory=WandBConfig)
|
| checkpoint_path: Path | None = field(init=False, default=None)
|
|
|
| rename_map: dict[str, str] = field(default_factory=dict)
|
|
|
| def validate(self) -> None:
|
|
|
| policy_path = parser.get_path_arg("policy")
|
| if policy_path:
|
|
|
| cli_overrides = parser.get_cli_overrides("policy")
|
| self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
|
| self.policy.pretrained_path = Path(policy_path)
|
| elif self.resume:
|
|
|
| config_path = parser.parse_arg("config_path")
|
| if not config_path:
|
| raise ValueError(
|
| f"A config_path is expected when resuming a run. Please specify path to {TRAIN_CONFIG_NAME}"
|
| )
|
|
|
| if not Path(config_path).resolve().exists():
|
| raise NotADirectoryError(
|
| f"{config_path=} is expected to be a local path. "
|
| "Resuming from the hub is not supported for now."
|
| )
|
|
|
| policy_dir = Path(config_path).parent
|
| if self.policy is not None:
|
| self.policy.pretrained_path = policy_dir
|
| self.checkpoint_path = policy_dir.parent
|
|
|
| if self.policy is None:
|
| raise ValueError(
|
| "Policy is not configured. Please specify a pretrained policy with `--policy.path`."
|
| )
|
|
|
| if not self.job_name:
|
| if self.env is None:
|
| self.job_name = f"{self.policy.type}"
|
| else:
|
| self.job_name = f"{self.env.type}_{self.policy.type}"
|
|
|
| if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir():
|
| raise FileExistsError(
|
| f"Output directory {self.output_dir} already exists and resume is {self.resume}. "
|
| f"Please change your output directory so that {self.output_dir} is not overwritten."
|
| )
|
| elif not self.output_dir:
|
| now = dt.datetime.now()
|
| train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}"
|
| self.output_dir = Path("outputs/train") / train_dir
|
|
|
| if isinstance(self.dataset.repo_id, list):
|
| raise NotImplementedError("LeRobotMultiDataset is not currently implemented.")
|
|
|
| if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None):
|
| raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.")
|
| elif self.use_policy_training_preset and not self.resume:
|
| self.optimizer = self.policy.get_optimizer_preset()
|
| self.scheduler = self.policy.get_scheduler_preset()
|
|
|
| if self.policy.push_to_hub and not self.policy.repo_id:
|
| raise ValueError(
|
| "'policy.repo_id' argument missing. Please specify it to push the model to the hub."
|
| )
|
|
|
| @classmethod
|
| def __get_path_fields__(cls) -> list[str]:
|
| """This enables the parser to load config from the policy using `--policy.path=local/dir`"""
|
| return ["policy"]
|
|
|
| def to_dict(self) -> dict[str, Any]:
|
| return draccus.encode(self)
|
|
|
| def _save_pretrained(self, save_directory: Path) -> None:
|
| with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"):
|
| draccus.dump(self, f, indent=4)
|
|
|
| @classmethod
|
| def from_pretrained(
|
| cls: builtins.type["TrainPipelineConfig"],
|
| pretrained_name_or_path: str | Path,
|
| *,
|
| force_download: bool = False,
|
| resume_download: bool | None = None,
|
| proxies: dict[Any, Any] | None = None,
|
| token: str | bool | None = None,
|
| cache_dir: str | Path | None = None,
|
| local_files_only: bool = False,
|
| revision: str | None = None,
|
| **kwargs: Any,
|
| ) -> "TrainPipelineConfig":
|
| model_id = str(pretrained_name_or_path)
|
| config_file: str | None = None
|
| if Path(model_id).is_dir():
|
| if TRAIN_CONFIG_NAME in os.listdir(model_id):
|
| config_file = os.path.join(model_id, TRAIN_CONFIG_NAME)
|
| else:
|
| print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}")
|
| elif Path(model_id).is_file():
|
| config_file = model_id
|
| else:
|
| try:
|
| config_file = hf_hub_download(
|
| repo_id=model_id,
|
| filename=TRAIN_CONFIG_NAME,
|
| revision=revision,
|
| cache_dir=cache_dir,
|
| force_download=force_download,
|
| proxies=proxies,
|
| resume_download=resume_download,
|
| token=token,
|
| local_files_only=local_files_only,
|
| )
|
| except HfHubHTTPError as e:
|
| raise FileNotFoundError(
|
| f"{TRAIN_CONFIG_NAME} not found on the HuggingFace Hub in {model_id}"
|
| ) from e
|
|
|
| cli_args = kwargs.pop("cli_args", [])
|
| with draccus.config_type("json"):
|
| return draccus.parse(cls, config_file, args=cli_args)
|
|
|
|
|
| @dataclass(kw_only=True)
|
| class TrainRLServerPipelineConfig(TrainPipelineConfig):
|
|
|
|
|
| dataset: DatasetConfig | None = None
|
|
|