File size: 23,207 Bytes
406662d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 | # Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import importlib.util
import os
import random
import subprocess
import sys
from time import sleep, time
import ray
import util
from ray import air, tune
from ray.tune import Callback
from ray.tune.progress_reporter import ProgressReporter
from ray.tune.search.optuna import OptunaSearch
from ray.tune.search.repeater import Repeater
from ray.tune.stopper import CombinedStopper
"""
This script breaks down an aggregate tuning job, as defined by a hyperparameter sweep configuration,
into individual jobs (shell commands) to run on the GPU-enabled nodes of the cluster.
By default, one worker is created for each GPU-enabled node in the cluster for each individual job.
To use more than one worker per node (likely the case for multi-GPU machines), supply the
num_workers_per_node argument.
Each hyperparameter sweep configuration should include the workflow,
runner arguments, and hydra arguments to vary.
This assumes that all workers in a cluster are homogeneous. For heterogeneous workloads,
create several heterogeneous clusters (with homogeneous nodes in each cluster),
then submit several overall-cluster jobs with :file:`../submit_job.py`.
KubeRay clusters on Google GKE can be created with :file:`../launch.py`
To report tune metrics on clusters, a running MLFlow server with a known URI that the cluster has
access to is required. For KubeRay clusters configured with :file:`../launch.py`, this is included
automatically, and can be easily found with with :file:`grok_cluster_with_kubectl.py`
Usage:
.. code-block:: bash
./isaaclab.sh -p scripts/reinforcement_learning/ray/tuner.py -h
# Examples
# Local
./isaaclab.sh -p scripts/reinforcement_learning/ray/tuner.py --run_mode local \
--cfg_file scripts/reinforcement_learning/ray/hyperparameter_tuning/vision_cartpole_cfg.py \
--cfg_class CartpoleTheiaJobCfg
# Local with a custom progress reporter
./isaaclab.sh -p scripts/reinforcement_learning/ray/tuner.py \
--cfg_file scripts/reinforcement_learning/ray/hyperparameter_tuning/vision_cartpole_cfg.py \
--cfg_class CartpoleTheiaJobCfg \
--progress_reporter CustomCartpoleProgressReporter
# Remote (run grok cluster or create config file mentioned in :file:`submit_job.py`)
./isaaclab.sh -p scripts/reinforcement_learning/ray/submit_job.py \
--aggregate_jobs tuner.py \
--cfg_file hyperparameter_tuning/vision_cartpole_cfg.py \
--cfg_class CartpoleTheiaJobCfg --mlflow_uri <MLFLOW_URI_FROM_GROK_OR_MANUAL>
"""
DOCKER_PREFIX = "/workspace/isaaclab/"
BASE_DIR = os.path.expanduser("~")
PYTHON_EXEC = "./isaaclab.sh -p"
WORKFLOW = "scripts/reinforcement_learning/rl_games/train.py"
NUM_WORKERS_PER_NODE = 1 # needed for local parallelism
PROCESS_RESPONSE_TIMEOUT = 200.0 # seconds to wait before killing the process when it stops responding
MAX_LINES_TO_SEARCH_EXPERIMENT_LOGS = 1000 # maximum number of lines to read from the training process logs
MAX_LOG_EXTRACTION_ERRORS = 10 # maximum allowed LogExtractionErrors before we abort the whole training
class IsaacLabTuneTrainable(tune.Trainable):
"""The Isaac Lab Ray Tune Trainable.
This class uses the standalone workflows to start jobs, along with the hydra integration.
This class achieves Ray-based logging through reading the tensorboard logs from
the standalone workflows. This depends on a config generated in the format of
:class:`JobCfg`
"""
def setup(self, config: dict) -> None:
"""Get the invocation command, return quick for easy scheduling."""
self.data = None
self.time_since_last_proc_response = 0.0
self.invoke_cmd = util.get_invocation_command_from_cfg(cfg=config, python_cmd=PYTHON_EXEC, workflow=WORKFLOW)
print(f"[INFO]: Recovered invocation with {self.invoke_cmd}")
self.experiment = None
def reset_config(self, new_config: dict):
"""Allow environments to be reused by fetching a new invocation command"""
self.setup(new_config)
return True
def step(self) -> dict:
if self.experiment is None: # start experiment
# When including this as first step instead of setup, experiments get scheduled faster
# Don't want to block the scheduler while the experiment spins up
print(f"[INFO]: Invoking experiment as first step with {self.invoke_cmd}...")
try:
experiment = util.execute_job(
self.invoke_cmd,
identifier_string="",
extract_experiment=True, # Keep this as True to return a valid dictionary
persistent_dir=BASE_DIR,
max_lines_to_search_logs=MAX_LINES_TO_SEARCH_EXPERIMENT_LOGS,
max_time_to_search_logs=PROCESS_RESPONSE_TIMEOUT,
)
except util.LogExtractionError:
self.data = {
"LOG_EXTRACTION_ERROR_STOPPER_FLAG": True,
"done": True,
}
return self.data
self.experiment = experiment
print(f"[INFO]: Tuner recovered experiment info {experiment}")
self.proc = experiment["proc"]
self.experiment_name = experiment["experiment_name"]
self.isaac_logdir = experiment["logdir"]
self.tensorboard_logdir = self.isaac_logdir + "/" + self.experiment_name
self.done = False
if self.proc is None:
raise ValueError("Could not start trial.")
proc_status = self.proc.poll()
if proc_status is not None: # process finished, signal finish
self.data["done"] = True
print(f"[INFO]: Process finished with {proc_status}, returning...")
else: # wait until the logs are ready or fresh
data = util.load_tensorboard_logs(self.tensorboard_logdir)
while data is None:
data = util.load_tensorboard_logs(self.tensorboard_logdir)
proc_status = self.proc.poll()
if proc_status is not None:
break
sleep(2) # Lazy report metrics to avoid performance overhead
if self.data is not None:
data_ = {k: v for k, v in data.items() if k != "done"}
self_data_ = {k: v for k, v in self.data.items() if k != "done"}
unresponsiveness_start_time = time()
while util._dicts_equal(data_, self_data_):
self.time_since_last_proc_response = time() - unresponsiveness_start_time
data = util.load_tensorboard_logs(self.tensorboard_logdir)
data_ = {k: v for k, v in data.items() if k != "done"}
proc_status = self.proc.poll()
if proc_status is not None:
break
if self.time_since_last_proc_response > PROCESS_RESPONSE_TIMEOUT:
self.time_since_last_proc_response = 0.0
print("[WARNING]: Training workflow process is not responding, terminating...")
self.proc.terminate()
try:
self.proc.wait(timeout=20)
except subprocess.TimeoutExpired:
print("[ERROR]: The process did not terminate within timeout duration.")
self.proc.kill()
self.proc.wait()
self.data = data
self.data["done"] = True
return self.data
sleep(2) # Lazy report metrics to avoid performance overhead
self.data = data
self.data["done"] = False
return self.data
def default_resource_request(self):
"""How many resources each trainable uses. Assumes homogeneous resources across gpu nodes,
and that each trainable is meant for one node, where it uses all available resources."""
resources = util.get_gpu_node_resources(one_node_only=True)
if NUM_WORKERS_PER_NODE != 1:
print("[WARNING]: Splitting node into more than one worker")
return tune.PlacementGroupFactory(
[{"CPU": resources["CPU"] / NUM_WORKERS_PER_NODE, "GPU": resources["GPU"] / NUM_WORKERS_PER_NODE}],
strategy="STRICT_PACK",
)
class LogExtractionErrorStopper(tune.Stopper):
"""Stopper that stops all trials if multiple LogExtractionErrors occur.
Args:
max_errors: The maximum number of LogExtractionErrors allowed before terminating the experiment.
"""
def __init__(self, max_errors: int):
self.max_errors = max_errors
self.error_count = 0
def __call__(self, trial_id, result):
"""Increments the error count if trial has encountered a LogExtractionError.
It does not stop the trial based on the metrics, always returning False.
"""
if result.get("LOG_EXTRACTION_ERROR_STOPPER_FLAG", False):
self.error_count += 1
print(
f"[ERROR]: Encountered LogExtractionError {self.error_count} times. "
f"Maximum allowed is {self.max_errors}."
)
return False
def stop_all(self):
"""Returns true if number of LogExtractionErrors exceeds the maximum allowed, terminating the experiment."""
if self.error_count > self.max_errors:
print("[FATAL]: Encountered LogExtractionError more than allowed, aborting entire tuning run... ")
return True
else:
return False
class ProcessCleanupCallback(Callback):
"""Callback to clean up processes when trials are stopped."""
def on_trial_error(self, iteration, trials, trial, error, **info):
"""Called when a trial encounters an error."""
self._cleanup_trial(trial)
def on_trial_complete(self, iteration, trials, trial, **info):
"""Called when a trial completes."""
self._cleanup_trial(trial)
def _cleanup_trial(self, trial):
"""Clean up processes for a trial using SIGKILL."""
try:
subprocess.run(["pkill", "-9", "-f", f"rid {trial.config['runner_args']['-rid']}"], check=False)
sleep(5)
except Exception as e:
print(f"[ERROR]: Failed to cleanup trial {trial.trial_id}: {e}")
def invoke_tuning_run(
cfg: dict,
args: argparse.Namespace,
progress_reporter: ProgressReporter | None = None,
stopper: tune.Stopper | None = None,
) -> None:
"""Invoke an Isaac-Ray tuning run.
Log either to a local directory or to MLFlow.
Args:
cfg: Configuration dictionary extracted from job setup
args: Command-line arguments related to tuning.
progress_reporter: Custom progress reporter. Defaults to CLIReporter or JupyterNotebookReporter if not provided.
stopper: Custom stopper, optional.
"""
# Allow for early exit
os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "1"
print("[WARNING]: Not saving checkpoints, just running experiment...")
print("[INFO]: Model parameters and metrics will be preserved.")
print("[WARNING]: For homogeneous cluster resources only...")
# Initialize Ray
util.ray_init(
ray_address=args.ray_address,
log_to_driver=True,
)
# Get available resources
resources = util.get_gpu_node_resources()
print(f"[INFO]: Available resources {resources}")
print(f"[INFO]: Using config {cfg}")
# Configure the search algorithm and the repeater
searcher = OptunaSearch(
metric=args.metric,
mode=args.mode,
)
repeat_search = Repeater(searcher, repeat=args.repeat_run_count)
# Configure the stoppers
stoppers: CombinedStopper = CombinedStopper(
*[
LogExtractionErrorStopper(max_errors=MAX_LOG_EXTRACTION_ERRORS),
*([stopper] if stopper is not None else []),
]
)
if progress_reporter is not None:
os.environ["RAY_AIR_NEW_OUTPUT"] = "0"
if (
getattr(progress_reporter, "_metric", None) is not None
or getattr(progress_reporter, "_mode", None) is not None
):
raise ValueError(
"Do not set <metric> or <mode> directly in the custom progress reporter class, "
"provide them as arguments to tuner.py instead."
)
if args.run_mode == "local": # Standard config, to file
run_config = air.RunConfig(
storage_path="/tmp/ray",
name=f"IsaacRay-{args.cfg_class}-tune",
callbacks=[ProcessCleanupCallback()],
verbose=1,
checkpoint_config=air.CheckpointConfig(
checkpoint_frequency=0, # Disable periodic checkpointing
checkpoint_at_end=False, # Disable final checkpoint
),
stop=stoppers,
progress_reporter=progress_reporter,
)
elif args.run_mode == "remote": # MLFlow, to MLFlow server
mlflow_callback = MLflowLoggerCallback(
tracking_uri=args.mlflow_uri,
experiment_name=f"IsaacRay-{args.cfg_class}-tune",
save_artifact=False,
tags={"run_mode": "remote", "cfg_class": args.cfg_class},
)
run_config = ray.train.RunConfig(
name="mlflow",
storage_path="/tmp/ray",
callbacks=[ProcessCleanupCallback(), mlflow_callback],
checkpoint_config=ray.train.CheckpointConfig(checkpoint_frequency=0, checkpoint_at_end=False),
stop=stoppers,
progress_reporter=progress_reporter,
)
else:
raise ValueError("Unrecognized run mode.")
# RID isn't optimized as it is sampled from, but useful for cleanup later
cfg["runner_args"]["-rid"] = tune.sample_from(lambda _: str(random.randint(int(1e9), int(1e10) - 1)))
# Configure the tuning job
tuner = tune.Tuner(
IsaacLabTuneTrainable,
param_space=cfg,
tune_config=tune.TuneConfig(
metric=args.metric,
mode=args.mode,
search_alg=repeat_search,
num_samples=args.num_samples,
reuse_actors=True,
),
run_config=run_config,
)
# Execute the tuning
tuner.fit()
# Save results to mounted volume
if args.run_mode == "local":
print("[DONE!]: Check results with tensorboard dashboard")
else:
print("[DONE!]: Check results with MLFlow dashboard")
class JobCfg:
"""To be compatible with :meth: invoke_tuning_run and :class:IsaacLabTuneTrainable,
at a minimum, the tune job should inherit from this class."""
def __init__(self, cfg: dict):
"""
Runner args include command line arguments passed to the task.
For example:
cfg["runner_args"]["headless_singleton"] = "--headless"
cfg["runner_args"]["enable_cameras_singleton"] = "--enable_cameras"
"""
assert "runner_args" in cfg, "No runner arguments specified."
"""
Task is the desired task to train on. For example:
cfg["runner_args"]["--task"] = tune.choice(["Isaac-Cartpole-RGB-TheiaTiny-v0"])
"""
assert "--task" in cfg["runner_args"], "No task specified."
"""
Hydra args define the hyperparameters varied within the sweep. For example:
cfg["hydra_args"]["agent.params.network.cnn.activation"] = tune.choice(["relu", "elu"])
"""
assert "hydra_args" in cfg, "No hyperparameters specified."
self.cfg = cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tune Isaac Lab hyperparameters.")
parser.add_argument("--ray_address", type=str, default="auto", help="the Ray address.")
parser.add_argument(
"--cfg_file",
type=str,
default="hyperparameter_tuning/vision_cartpole_cfg.py",
required=False,
help="The relative filepath where a hyperparameter sweep is defined",
)
parser.add_argument(
"--cfg_class",
type=str,
default="CartpoleRGBNoTuneJobCfg",
required=False,
help="Name of the hyperparameter sweep class to use",
)
parser.add_argument(
"--run_mode",
choices=["local", "remote"],
default="remote",
help=(
"Set to local to use ./isaaclab.sh -p python, set to "
"remote to use /workspace/isaaclab/isaaclab.sh -p python"
),
)
parser.add_argument(
"--workflow",
default=None, # populated with RL Games
help="The absolute path of the workflow to use for the experiment. By default, RL Games is used.",
)
parser.add_argument(
"--mlflow_uri",
type=str,
default=None,
required=False,
help="The MLFlow Uri.",
)
parser.add_argument(
"--num_workers_per_node",
type=int,
default=1,
help="Number of workers to run on each GPU node. Only supply for parallelism on multi-gpu nodes",
)
parser.add_argument("--metric", type=str, default="rewards/time", help="What metric to tune for.")
parser.add_argument(
"--mode",
choices=["max", "min"],
default="max",
help="What to optimize the metric to while tuning",
)
parser.add_argument(
"--num_samples",
type=int,
default=100,
help="How many hyperparameter runs to try total.",
)
parser.add_argument(
"--repeat_run_count",
type=int,
default=3,
help="How many times to repeat each hyperparameter config.",
)
parser.add_argument(
"--process_response_timeout",
type=float,
default=PROCESS_RESPONSE_TIMEOUT,
help="Training workflow process response timeout.",
)
parser.add_argument(
"--max_lines_to_search_experiment_logs",
type=float,
default=MAX_LINES_TO_SEARCH_EXPERIMENT_LOGS,
help="Max number of lines to search for experiment logs before terminating the training workflow process.",
)
parser.add_argument(
"--max_log_extraction_errors",
type=float,
default=MAX_LOG_EXTRACTION_ERRORS,
help="Max number number of LogExtractionError failures before we abort the whole tuning run.",
)
parser.add_argument(
"--progress_reporter",
type=str,
default=None,
help=(
"Optional: name of a custom reporter class defined in the cfg_file. "
"Must subclass ray.tune.ProgressReporter "
"(e.g., CustomCartpoleProgressReporter)."
),
)
parser.add_argument(
"--stopper",
type=str,
default=None,
help="A stop criteria in the cfg_file, must be a tune.Stopper instance.",
)
args = parser.parse_args()
PROCESS_RESPONSE_TIMEOUT = args.process_response_timeout
MAX_LINES_TO_SEARCH_EXPERIMENT_LOGS = int(args.max_lines_to_search_experiment_logs)
print(
"[INFO]: The max number of lines to search for experiment logs before (early) terminating the training "
f"workflow process is set to {MAX_LINES_TO_SEARCH_EXPERIMENT_LOGS}.\n"
"[INFO]: The process response timeout, used while updating tensorboard scalars and searching for "
f"experiment logs, is set to {PROCESS_RESPONSE_TIMEOUT} seconds."
)
MAX_LOG_EXTRACTION_ERRORS = int(args.max_log_extraction_errors)
print(
"[INFO]: Max number of LogExtractionError failures before we abort the whole tuning run is "
f"set to {MAX_LOG_EXTRACTION_ERRORS}.\n"
)
NUM_WORKERS_PER_NODE = args.num_workers_per_node
print(f"[INFO]: Using {NUM_WORKERS_PER_NODE} workers per node.")
if args.run_mode == "remote":
BASE_DIR = DOCKER_PREFIX # ensure logs are dumped to persistent location
PYTHON_EXEC = DOCKER_PREFIX + PYTHON_EXEC[2:]
if args.workflow is None:
WORKFLOW = DOCKER_PREFIX + WORKFLOW
else:
WORKFLOW = args.workflow
print(f"[INFO]: Using remote mode {PYTHON_EXEC=} {WORKFLOW=}")
if args.mlflow_uri is not None:
import mlflow
mlflow.set_tracking_uri(args.mlflow_uri)
from ray.air.integrations.mlflow import MLflowLoggerCallback
else:
raise ValueError("Please provide a result MLFLow URI server.")
else: # local
PYTHON_EXEC = os.getcwd() + "/" + PYTHON_EXEC[2:]
if args.workflow is None:
WORKFLOW = os.getcwd() + "/" + WORKFLOW
else:
WORKFLOW = args.workflow
BASE_DIR = os.getcwd()
print(f"[INFO]: Using local mode {PYTHON_EXEC=} {WORKFLOW=}")
file_path = args.cfg_file
class_name = args.cfg_class
print(f"[INFO]: Attempting to use sweep config from {file_path=} {class_name=}")
module_name = os.path.splitext(os.path.basename(file_path))[0]
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
print(f"[INFO]: Successfully imported {module_name} from {file_path}")
if hasattr(module, class_name):
ClassToInstantiate = getattr(module, class_name)
print(f"[INFO]: Found correct class {ClassToInstantiate}")
instance = ClassToInstantiate()
print(f"[INFO]: Successfully instantiated class '{class_name}' from {file_path}")
cfg = instance.cfg
print(f"[INFO]: Grabbed the following hyperparameter sweep config: \n {cfg}")
# Load optional stopper config
stopper = None
if args.stopper and hasattr(module, args.stopper):
stopper = getattr(module, args.stopper)
if isinstance(stopper, type) and issubclass(stopper, tune.Stopper):
stopper = stopper()
else:
raise TypeError(f"[ERROR]: Unsupported stop criteria type: {type(stopper)}")
print(f"[INFO]: Loaded custom stop criteria from '{args.stopper}'")
# Load optional progress reporter config
progress_reporter = None
if args.progress_reporter and hasattr(module, args.progress_reporter):
progress_reporter = getattr(module, args.progress_reporter)
if isinstance(progress_reporter, type) and issubclass(progress_reporter, tune.ProgressReporter):
progress_reporter = progress_reporter()
else:
raise TypeError(f"[ERROR]: {args.progress_reporter} is not a valid ProgressReporter.")
print(f"[INFO]: Loaded custom progress reporter from '{args.progress_reporter}'")
invoke_tuning_run(cfg, args, progress_reporter=progress_reporter, stopper=stopper)
else:
raise AttributeError(f"[ERROR]:Class '{class_name}' not found in {file_path}")
|